path
stringlengths
14
112
content
stringlengths
0
6.32M
size
int64
0
6.32M
max_lines
int64
1
100k
repo_name
stringclasses
2 values
autogenerated
bool
1 class
cosmopolitan/third_party/stb/stb_vorbis.h
#ifndef COSMOPOLITAN_THIRD_PARTY_STB_STB_VORBIS_H_ #define COSMOPOLITAN_THIRD_PARTY_STB_STB_VORBIS_H_ #include "libc/stdio/stdio.h" #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ enum STBVorbisError { VORBIS__no_error, VORBIS_need_more_data = 1, // not a real error VORBIS_invalid_api_mixing, // can't mix API modes VORBIS_outofmem, // not enough memory VORBIS_feature_not_supported, // uses floor 0 VORBIS_too_many_channels, // STB_VORBIS_MAX_CHANNELS is too small VORBIS_file_open_failure, // fopen() failed VORBIS_seek_without_length, // can't seek in unknown-length file VORBIS_unexpected_eof = 10, // file is truncated? VORBIS_seek_invalid, // seek past EOF VORBIS_invalid_setup = 20, // decoding errors VORBIS_invalid_stream, VORBIS_missing_capture_pattern = 30, // ogg errors VORBIS_invalid_stream_structure_version, VORBIS_continued_packet_flag_invalid, VORBIS_incorrect_stream_serial_number, VORBIS_invalid_first_page, VORBIS_bad_packet_type, VORBIS_cant_find_last_page, VORBIS_seek_failed, VORBIS_ogg_skeleton_not_supported }; typedef struct { char *alloc_buffer; int alloc_buffer_length_in_bytes; } stb_vorbis_alloc; typedef struct stb_vorbis stb_vorbis; typedef struct { unsigned int sample_rate; int channels; unsigned int setup_memory_required; unsigned int setup_temp_memory_required; unsigned int temp_memory_required; int max_frame_size; } stb_vorbis_info; // get general information about the file stb_vorbis_info stb_vorbis_get_info(stb_vorbis *f); // get the last error detected (clears it, too) int stb_vorbis_get_error(stb_vorbis *f); // close an ogg vorbis file and free all memory in use void stb_vorbis_close(stb_vorbis *f); // this function returns the offset (in samples) from the beginning of the // file that will be returned by the next decode, if it is known, or -1 // otherwise. after a flush_pushdata() call, this may take a while before // it becomes valid again. // NOT WORKING YET after a seek with PULLDATA API int stb_vorbis_get_sample_offset(stb_vorbis *f); // returns the current seek point within the file, or offset from the beginning // of the memory buffer. In pushdata mode it returns 0. unsigned int stb_vorbis_get_file_offset(stb_vorbis *f); //////////////////////////////////////////////////////////////////////////////// // PUSHDATA // this API allows you to get blocks of data from any source and hand // them to stb_vorbis. you have to buffer them; stb_vorbis will tell // you how much it used, and you have to give it the rest next time; // and stb_vorbis may not have enough data to work with and you will // need to give it the same data again PLUS more. Note that the Vorbis // specification does not bound the size of an individual frame. stb_vorbis *stb_vorbis_open_pushdata(const unsigned char *datablock, int datablock_length_in_bytes, int *datablock_memory_consumed_in_bytes, int *error, const stb_vorbis_alloc *alloc_buffer); // create a vorbis decoder by passing in the initial data block containing // the ogg&vorbis headers (you don't need to do parse them, just provide // the first N bytes of the file--you're told if it's not enough, see below) // on success, returns an stb_vorbis *, does not set error, returns the amount // of // data parsed/consumed on this call in *datablock_memory_consumed_in_bytes; // on failure, returns NULL on error and sets *error, does not change // *datablock_memory_consumed if returns NULL and *error is // VORBIS_need_more_data, then the input block was // incomplete and you need to pass in a larger block from the start of the // file int stb_vorbis_decode_frame_pushdata( stb_vorbis *f, const unsigned char *datablock, int datablock_length_in_bytes, int *channels, // place to write number of float * buffers float ***output, // place to write float ** array of float * buffers int *samples // place to write number of output samples ); // decode a frame of audio sample data if possible from the passed-in data block // // return value: number of bytes we used from datablock // // possible cases: // 0 bytes used, 0 samples output (need more data) // N bytes used, 0 samples output (resynching the stream, keep going) // N bytes used, M samples output (one frame of data) // note that after opening a file, you will ALWAYS get one N-bytes,0-sample // frame, because Vorbis always "discards" the first frame. // // Note that on resynch, stb_vorbis will rarely consume all of the buffer, // instead only datablock_length_in_bytes-3 or less. This is because it wants // to avoid missing parts of a page header if they cross a datablock boundary, // without writing state-machiney code to record a partial detection. // // The number of channels returned are stored in *channels (which can be // NULL--it is always the same as the number of channels reported by // get_info). *output will contain an array of float* buffers, one per // channel. In other words, (*output)[0][0] contains the first sample from // the first channel, and (*output)[1][0] contains the first sample from // the second channel. void stb_vorbis_flush_pushdata(stb_vorbis *f); // inform stb_vorbis that your next datablock will not be contiguous with // previous ones (e.g. you've seeked in the data); future attempts to decode // frames will cause stb_vorbis to resynchronize (as noted above), and // once it sees a valid Ogg page (typically 4-8KB, as large as 64KB), it // will begin decoding the _next_ frame. // // if you want to seek using pushdata, you need to seek in your file, then // call stb_vorbis_flush_pushdata(), then start calling decoding, then once // decoding is returning you data, call stb_vorbis_get_sample_offset, and // if you don't like the result, seek your file again and repeat. //////////////////////////////////////////////////////////////////////////////// // PULLING INPUT API // // This API assumes stb_vorbis is allowed to pull data from a source-- // either a block of memory containing the _entire_ vorbis stream, or a // FILE * that you or it create, or possibly some other reading mechanism // if you go modify the source to replace the FILE * case with some kind // of callback to your code. (But if you don't support seeking, you may // just want to go ahead and use pushdata.) int stb_vorbis_decode_filename(const char *filename, int *channels, int *sample_rate, short **output); // decode an entire file and output the data interleaved into a malloc()ed // buffer stored in *output. The return value is the number of samples // decoded, or -1 if the file could not be opened or was not an ogg vorbis file. // When you're done with it, just free() the pointer returned in *output. int stb_vorbis_decode_memory(const unsigned char *mem, int len, int *channels, int *sample_rate, short **output); // create an ogg vorbis decoder from an ogg vorbis stream in memory (note // this must be the entire stream!). on failure, returns NULL and sets *error stb_vorbis *stb_vorbis_open_memory(const unsigned char *data, int len, int *error, const stb_vorbis_alloc *alloc_buffer); // create an ogg vorbis decoder from a filename via fopen(). on failure, // returns NULL and sets *error (possibly to VORBIS_file_open_failure). stb_vorbis *stb_vorbis_open_filename(const char *filename, int *error, const stb_vorbis_alloc *alloc_buffer); // create an ogg vorbis decoder from an open FILE *, looking for a stream at // the _current_ seek point (ftell). on failure, returns NULL and sets *error. // note that stb_vorbis must "own" this stream; if you seek it in between // calls to stb_vorbis, it will become confused. Moreover, if you attempt to // perform stb_vorbis_seek_*() operations on this file, it will assume it // owns the _entire_ rest of the file after the start point. Use the next // function, stb_vorbis_open_file_section(), to limit it. stb_vorbis *stb_vorbis_open_file(FILE *f, int close_handle_on_close, int *error, const stb_vorbis_alloc *alloc_buffer); // create an ogg vorbis decoder from an open FILE *, looking for a stream at // the _current_ seek point (ftell); the stream will be of length 'len' bytes. // on failure, returns NULL and sets *error. note that stb_vorbis must "own" // this stream; if you seek it in between calls to stb_vorbis, it will become // confused. stb_vorbis *stb_vorbis_open_file_section(FILE *f, int close_handle_on_close, int *error, const stb_vorbis_alloc *alloc_buffer, unsigned int len); // these functions seek in the Vorbis file to (approximately) 'sample_number'. // after calling seek_frame(), the next call to get_frame_*() will include // the specified sample. after calling stb_vorbis_seek(), the next call to // stb_vorbis_get_samples_* will start with the specified sample. If you // do not need to seek to EXACTLY the target sample when using get_samples_*, // you can also use seek_frame(). int stb_vorbis_seek_frame(stb_vorbis *f, unsigned int sample_number); int stb_vorbis_seek(stb_vorbis *f, unsigned int sample_number); // this function is equivalent to stb_vorbis_seek(f,0) int stb_vorbis_seek_start(stb_vorbis *f); // these functions return the total length of the vorbis stream unsigned int stb_vorbis_stream_length_in_samples(stb_vorbis *f); float stb_vorbis_stream_length_in_seconds(stb_vorbis *f); // decode the next frame and return the number of samples. the number of // channels returned are stored in *channels (which can be NULL--it is always // the same as the number of channels reported by get_info). *output will // contain an array of float* buffers, one per channel. These outputs will // be overwritten on the next call to stb_vorbis_get_frame_*. // // You generally should not intermix calls to stb_vorbis_get_frame_*() // and stb_vorbis_get_samples_*(), since the latter calls the former. int stb_vorbis_get_frame_float(stb_vorbis *f, int *channels, float ***output); int stb_vorbis_get_frame_short_interleaved(stb_vorbis *f, int num_c, short *buffer, int num_shorts); // decode the next frame and return the number of *samples* per channel. // Note that for interleaved data, you pass in the number of shorts (the // size of your array), but the return value is the number of samples per // channel, not the total number of samples. // // The data is coerced to the number of channels you request according to the // channel coercion rules (see below). You must pass in the size of your // buffer(s) so that stb_vorbis will not overwrite the end of the buffer. // The maximum buffer size needed can be gotten from get_info(); however, // the Vorbis I specification implies an absolute maximum of 4096 samples // per channel. int stb_vorbis_get_frame_short(stb_vorbis *f, int num_c, short **buffer, int num_samples); // Channel coercion rules: // Let M be the number of channels requested, and N the number of channels // present, and Cn be the nth channel; let stereo L be the sum of all L and // center channels, and stereo R be the sum of all R and center channels // (channel assignment from the vorbis spec). // M N output // 1 k sum(Ck) for all k // 2 * stereo L, stereo R // k l k > l, the first l channels, then 0s // k l k <= l, the first k channels // Note that this is not _good_ surround etc. mixing at all! It's just so // you get something useful. int stb_vorbis_get_samples_float_interleaved(stb_vorbis *f, int channels, float *buffer, int num_floats); int stb_vorbis_get_samples_float(stb_vorbis *f, int channels, float **buffer, int num_samples); // gets num_samples samples, not necessarily on a frame boundary--this requires // buffering so you have to supply the buffers. DOES NOT APPLY THE COERCION // RULES. Returns the number of samples stored per channel; it may be less than // requested at the end of the file. If there are no more samples in the file, // returns 0. int stb_vorbis_get_samples_short_interleaved(stb_vorbis *f, int channels, short *buffer, int num_shorts); int stb_vorbis_get_samples_short(stb_vorbis *f, int channels, short **buffer, int num_samples); // gets num_samples samples, not necessarily on a frame boundary--this requires // buffering so you have to supply the buffers. Applies the coercion rules above // to produce 'channels' channels. Returns the number of samples stored per // channel; it may be less than requested at the end of the file. If there are // no more samples in the file, returns 0. COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_THIRD_PARTY_STB_STB_VORBIS_H_ */
13,382
265
jart/cosmopolitan
false
cosmopolitan/third_party/stb/stb_image.h
#ifndef COSMOPOLITAN_THIRD_PARTY_STB_STB_IMAGE_H_ #define COSMOPOLITAN_THIRD_PARTY_STB_STB_IMAGE_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ enum { STBI_default = 0, // only used for desired_channels STBI_grey = 1, STBI_grey_alpha = 2, STBI_rgb = 3, STBI_rgb_alpha = 4 }; struct FILE; typedef struct { int (*read)(void *user, char *data, int size); // fill 'data' with 'size' bytes. return number of // bytes actually read void (*skip)(void *user, int n); // skip the next 'n' bytes, or 'unget' the // last -n bytes if negative int (*eof)(void *user); // returns nonzero if we are at end of file/data } stbi_io_callbacks; // // 8-bits-per-channel interface // unsigned char *stbi_load_from_memory(unsigned char const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels) mallocesque; unsigned char *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); unsigned char *stbi_load(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); unsigned char *stbi_load_from_file(struct FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); // for stbi_load_from_file, file pointer is left pointing immediately after // image unsigned char *stbi_load_gif_from_memory(unsigned char const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp); // // 16-bits-per-channel interface // unsigned short *stbi_load_16_from_memory(unsigned char const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels); unsigned short *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels); unsigned short *stbi_load_16(char const *filename, int *x, int *y, int *channels_in_file, int desired_channels); unsigned short *stbi_load_from_file_16(struct FILE *f, int *x, int *y, int *channels_in_file, int desired_channels); // get a VERY brief reason for failure // NOT THREADSAFE const char *stbi_failure_reason(void); // free the loaded image -- this is just free() void stbi_image_free(void *retval_from_stbi_load); // get image dimensions & components without fully decoding int stbi_info_from_memory(unsigned char const *buffer, int len, int *x, int *y, int *comp); int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); int stbi_is_16_bit_from_memory(unsigned char const *buffer, int len); int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user); int stbi_info(char const *filename, int *x, int *y, int *comp); int stbi_info_from_file(struct FILE *f, int *x, int *y, int *comp); int stbi_is_16_bit(char const *filename); int stbi_is_16_bit_from_file(struct FILE *f); // for image formats that explicitly notate that they have premultiplied alpha, // we just return the colors as stored in the file. set this flag to force // unpremultiplication. results are undefined if the unpremultiply overflow. void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); // indicate whether we should process iphone images back to canonical format, // or just pass them through "as-is" void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); // flip the image vertically, so the first pixel in the output array is the // bottom left void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); // ZLIB client - used by PNG, available for other purposes char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_THIRD_PARTY_STB_STB_IMAGE_H_ */
5,323
119
jart/cosmopolitan
false
cosmopolitan/third_party/stb/stb_truetype.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:3;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=3 sts=3 sw=3 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ │ │ stb_truetype │ │ Copyright 2017 Sean Barrett │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/assert.h" #include "libc/intrin/bits.h" #include "libc/intrin/likely.h" #include "libc/macros.internal.h" #include "libc/math.h" #include "libc/mem/mem.h" #include "libc/runtime/runtime.h" #include "libc/str/str.h" #include "third_party/stb/stb_rect_pack.h" #include "third_party/stb/stb_truetype.h" asm(".ident\t\"\\n\\n\ stb_truetype (MIT License)\\n\ Copyright 2017 Sean Barrett\""); asm(".include \"libc/disclaimer.inc\""); /* clang-format off */ // stb_truetype.h - v1.26 - public domain // authored from 2009-2021 by Sean Barrett / RAD Game Tools // // ======================================================================= // // NO SECURITY GUARANTEE -- DO NOT USE THIS ON UNTRUSTED FONT FILES // // This library does no range checking of the offsets found in the file, // meaning an attacker can use it to read arbitrary memory. // // ======================================================================= // // This library processes TrueType files: // parse files // extract glyph metrics // extract glyph shapes // render glyphs to one-channel bitmaps with antialiasing (box filter) // render glyphs to one-channel SDF bitmaps (signed-distance // field/function) // // Todo: // non-MS cmaps // crashproof on bad data // hinting? (no longer patented) // cleartype-style AA? // optimize: use simple memory allocator for intermediates // optimize: build edge-list directly from curves // optimize: rasterize directly from curves? // // ADDITIONAL CONTRIBUTORS // // Mikko Mononen: compound shape support, more cmap formats // Tor Andersson: kerning, subpixel rendering // Dougall Johnson: OpenType / Type 2 font handling // Daniel Ribeiro Maciel: basic GPOS-based kerning // // Misc other: // Ryan Gordon // Simon Glass // github:IntellectualKitty // Imanol Celaya // Daniel Ribeiro Maciel // // Bug/warning reports/fixes: // "Zer" on mollyrocket Fabian "ryg" Giesen github:NiLuJe // Cass Everitt Martins Mozeiko github:aloucks // stoiko (Haemimont Games) Cap Petschulat github:oyvindjam // Brian Hook Omar Cornut github:vassvik // Walter van Niftrik Ryan Griege // David Gow Peter LaValle // David Given Sergey Popov // Ivan-Assen Ivanov Giumo X. Clanjor // Anthony Pesch Higor Euripedes // Johan Duparc Thomas Fields // Hou Qiming Derek Vinyard // Rob Loach Cort Stratton // Kenney Phillis Jr. Brian Costabile // Ken Voskuil (kaesve) // // VERSION HISTORY // // 1.26 (2021-08-28) fix broken rasterizer // 1.25 (2021-07-11) many fixes // 1.24 (2020-02-05) fix warning // // 1.23 (2020-02-02) query SVG data for glyphs; query whole kerning // table (but only kern not GPOS) 1.22 (2019-08-11) minimize // missing-glyph duplication; fix kerning if both 'GPOS' and // 'kern' are defined 1.21 (2019-02-25) fix warning 1.20 // (2019-02-07) PackFontRange skips missing codepoints; // GetScaleFontVMetrics() 1.19 (2018-02-11) GPOS kerning, // STBTT_fmod 1.18 // // (2018-01-29) add missing function 1.17 (2017-07-23) make more // arguments const; doc fix 1.16 (2017-07-12) SDF support 1.15 // (2017-03-03) make more arguments const 1.14 (2017-01-16) // num-fonts-in-TTC function 1.13 // // (2017-01-02) support OpenType fonts, certain Apple fonts 1.12 // (2016-10-25) suppress warnings about casting away const with // -Wcast-qual 1.11 // // (2016-04-02) fix unused-variable warning 1.10 (2016-04-02) // user-defined fabs(); rare memory leak; remove duplicate typedef // 1.09 (2016-01-16) warning fix; avoid crash on outofmem; use // allocation userdata properly 1.08 // // (2015-09-13) document stbtt_Rasterize(); fixes for vertical & // horizontal edges 1.07 (2015-08-01) allow PackFontRanges to // accept arrays of sparse codepoints; variant PackFontRanges to // pack and render in separate phases; fix // stbtt_GetFontOFfsetForIndex (never worked for non-0 input?); // fixed an ASSERT() bug in the new rasterizer replace ASSERT() // with STBTT_ASSERT() in new rasterizer // // Full history can be found at the end of this file. // // LICENSE // // See end of file for license information. // // USAGE // // Include this file in whatever places need to refer to it. In ONE C/C++ // file, write: // #define STB_TRUETYPE_IMPLEMENTATION // before the #include of this file. This expands out the actual // implementation into that C/C++ file. // // To make the implementation private to the file that generates the // implementation, // #define STBTT_STATIC // // Simple 3D API (don't ship this, but it's fine for tools and quick start) // stbtt_BakeFontBitmap() -- bake a font to a bitmap for // use as texture stbtt_GetBakedQuad() -- compute quad // to draw for a given char // // Improved 3D API (more shippable): // #include "stb_rect_pack.h" -- optional, but you really // want it stbtt_PackBegin() stbtt_PackSetOversampling() -- // for improved quality on small fonts stbtt_PackFontRanges() -- pack // and renders stbtt_PackEnd() stbtt_GetPackedQuad() // // "Load" a font file from a memory buffer (you have to keep the buffer // loaded) // stbtt_InitFont() // stbtt_GetFontOffsetForIndex() -- indexing for TTC font // collections stbtt_GetNumberOfFonts() -- number of fonts // for TTC font collections // // Render a unicode codepoint to a bitmap // stbtt_GetCodepointBitmap() -- allocates and returns a // bitmap stbtt_MakeCodepointBitmap() -- renders into bitmap // you provide stbtt_GetCodepointBitmapBox() -- how big the // bitmap must be // // Character advance/positioning // stbtt_GetCodepointHMetrics() // stbtt_GetFontVMetrics() // stbtt_GetFontVMetricsOS2() // stbtt_GetCodepointKernAdvance() // // Starting with version 1.06, the rasterizer was replaced with a new, // faster and generally-more-precise rasterizer. The new rasterizer more // accurately measures pixel coverage for anti-aliasing, except in the case // where multiple shapes overlap, in which case it overestimates the AA pixel // coverage. Thus, anti-aliasing of intersecting shapes may look wrong. If // this turns out to be a problem, you can re-enable the old rasterizer with // #define STBTT_RASTERIZER_VERSION 1 // which will incur about a 15% speed hit. // // ADDITIONAL DOCUMENTATION // // Immediately after this block comment are a series of sample programs. // // After the sample programs is the "header file" section. This section // includes documentation for each API function. // // Some important concepts to understand to use this library: // // Codepoint // Characters are defined by unicode codepoints, e.g. 65 is // uppercase A, 231 is lowercase c with a cedilla, 0x7e30 is // the hiragana for "ma". // // Glyph // A visual character shape (every codepoint is rendered as // some glyph) // // Glyph index // A font-specific integer ID representing a glyph // // Baseline // Glyph shapes are defined relative to a baseline, which is the // bottom of uppercase characters. Characters extend both above // and below the baseline. // // Current Point // As you draw text to the screen, you keep track of a "current point" // which is the origin of each character. The current point's vertical // position is the baseline. Even "baked fonts" use this model. // // Vertical Font Metrics // The vertical qualities of the font, used to vertically position // and space the characters. See docs for stbtt_GetFontVMetrics. // // Font Size in Pixels or Points // The preferred interface for specifying font sizes in stb_truetype // is to specify how tall the font's vertical extent should be in // pixels. If that sounds good enough, skip the next paragraph. // // Most font APIs instead use "points", which are a common typographic // measurement for describing font size, defined as 72 points per inch. // stb_truetype provides a point API for compatibility. However, true // "per inch" conventions don't make much sense on computer displays // since different monitors have different number of pixels per // inch. For example, Windows traditionally uses a convention that // there are 96 pixels per inch, thus making 'inch' measurements have // nothing to do with inches, and thus effectively defining a point to // be 1.333 pixels. Additionally, the TrueType font data provides // an explicit scale factor to scale a given font's glyphs to points, // but the author has observed that this scale factor is often wrong // for non-commercial fonts, thus making fonts scaled in points // according to the TrueType spec incoherently sized in practice. // // DETAILED USAGE: // // Scale: // Select how high you want the font to be, in points or pixels. // Call ScaleForPixelHeight or ScaleForMappingEmToPixels to compute // a scale factor SF that will be used by all other functions. // // Baseline: // You need to select a y-coordinate that is the baseline of where // your text will appear. Call GetFontBoundingBox to get the // baseline-relative bounding box for all characters. SF*-y0 will be the // distance in pixels that the worst-case character could extend above the // baseline, so if you want the top edge of characters to appear at the top // of the screen where y=0, then you would set the baseline to SF*-y0. // // Current point: // Set the current point where the first character will appear. The // first character could extend left of the current point; this is font // dependent. You can either choose a current point that is the leftmost // point and hope, or add some padding, or check the bounding box or // left-side-bearing of the first character to be displayed and set // the current point based on that. // // Displaying a character: // Compute the bounding box of the character. It will contain signed values // relative to <current_point, baseline>. I.e. if it returns x0,y0,x1,y1, // then the character should be displayed in the rectangle from // <current_point+SF*x0, baseline+SF*y0> to // <current_point+SF*x1,baseline+SF*y1). // // Advancing for the next character: // Call GlyphHMetrics, and compute 'current_point += SF * advance'. // // // ADVANCED USAGE // // Quality: // // - Use the functions with Subpixel at the end to allow your characters // to have subpixel positioning. Since the font is anti-aliased, not // hinted, this is very import for quality. (This is not possible with // baked fonts.) // // - Kerning is now supported, and if you're supporting subpixel rendering // then kerning is worth using to give your text a polished look. // // Performance: // // - Convert Unicode codepoints to glyph indexes and operate on the glyphs; // if you don't do this, stb_truetype is forced to do the conversion on // every call. // // - There are a lot of memory allocations. We should modify it to take // a temp buffer and allocate from the temp buffer (without freeing), // should help performance a lot. // // NOTES // // The system uses the raw data found in the .ttf file without changing it // and without building auxiliary data structures. This is a bit inefficient // on little-endian systems (the data is big-endian), but assuming you're // caching the bitmaps or glyph shapes this shouldn't be a big deal. // // It appears to be very hard to programmatically determine what font a // given file is in a general way. I provide an API for this, but I don't // recommend it. // // // PERFORMANCE MEASUREMENTS FOR 1.06: // // 32-bit 64-bit // Previous release: 8.83 s 7.68 s // Pool allocations: 7.72 s 6.34 s // Inline sort : 6.54 s 5.65 s // New rasterizer : 5.63 s 5.00 s #define STBTT_MAX_OVERSAMPLE 8 #define STBTT_RASTERIZER_VERSION 2 #define STBTT__OVER_MASK (STBTT_MAX_OVERSAMPLE-1) #define STBTT_ifloor(x) ((int)floor(x)) #define STBTT_iceil(x) ((int)ceil(x)) #define STBTT_malloc(x,u) ((void)(u),malloc(x)) #define STBTT_free(x,u) ((void)(u),free(x)) #define stbtt__buf_get16(b) stbtt__buf_get((b), 2) #define stbtt__buf_get32(b) stbtt__buf_get((b), 4) #define ttBYTE(p) (*(uint8_t *)(p)) #define ttCHAR(p) (*(int8_t *)(p)) #define ttUSHORT(p) READ16BE(p) #define ttSHORT(p) ((int16_t)READ16BE(p)) #define ttULONG(p) READ32BE(p) #define ttLONG(p) ((int32_t)READ32BE(p)) #define stbtt_tag(p,s) (READ32LE(p)==READ32LE(s)) #define STBTT__CSCTX_INIT(bounds) {bounds,0, 0,0, 0,0, 0,0,0,0, NULL, 0} #define ASSERT(x) ((void)(LIKELY(x) || (stbtt__assert_fail(#x, __LINE__), 0))) typedef struct { float x; float y; } stbtt__point; typedef struct { int bounds; int started; float first_x, first_y; float x, y; int32_t min_x, max_x, min_y, max_y; stbtt_vertex *pvertices; int num_vertices; } stbtt__csctx; typedef struct stbtt__hheap_chunk { struct stbtt__hheap_chunk *next; } stbtt__hheap_chunk; typedef struct stbtt__hheap { struct stbtt__hheap_chunk *head; void *first_free; int num_remaining_in_head_chunk; } stbtt__hheap; typedef struct stbtt__edge { float x0,y0, x1,y1; int invert; } stbtt__edge; typedef struct stbtt__active_edge { struct stbtt__active_edge *next; #if STBTT_RASTERIZER_VERSION==1 int x,dx; float ey; int direction; #elif STBTT_RASTERIZER_VERSION==2 float fx,fdx,fdy; float direction; float sy; float ey; #else #error "Unrecognized value of STBTT_RASTERIZER_VERSION" #endif } stbtt__active_edge; typedef char stbtt__check_size32[sizeof(int32_t)==4 ? 1 : -1]; typedef char stbtt__check_size16[sizeof(int16_t)==2 ? 1 : -1]; typedef int stbtt__test_oversample_pow2[(STBTT_MAX_OVERSAMPLE & (STBTT_MAX_OVERSAMPLE-1)) == 0 ? 1 : -1]; jmp_buf stbtt_jmpbuf; static wontreturn void stbtt__assert_fail(const char *x, int line) { if (stbtt_jmpbuf[0]) { longjmp(stbtt_jmpbuf, line); } else { __assert_fail(x, __FILE__, line); abort(); } } static uint8_t stbtt__buf_get8(stbtt__buf *b) { if (b->cursor >= b->size) return 0; return b->data[b->cursor++]; } static uint8_t stbtt__buf_peek8(stbtt__buf *b) { if (b->cursor >= b->size) return 0; return b->data[b->cursor]; } static void stbtt__buf_seek(stbtt__buf *b, int o) { ASSERT(!(o > b->size || o < 0)); b->cursor = (o > b->size || o < 0) ? b->size : o; } static void stbtt__buf_skip(stbtt__buf *b, int o) { stbtt__buf_seek(b, b->cursor + o); } static uint32_t stbtt__buf_get(stbtt__buf *b, int n) { uint32_t v = 0; int i; ASSERT(n >= 1 && n <= 4); for (i = 0; i < n; i++) v = (v << 8) | stbtt__buf_get8(b); return v; } static stbtt__buf stbtt__new_buf(const void *p, size_t size) { stbtt__buf r; ASSERT(size < 0x40000000); r.data = (uint8_t*) p; r.size = (int) size; r.cursor = 0; return r; } static stbtt__buf stbtt__buf_range(const stbtt__buf *b, int o, int s) { stbtt__buf r = stbtt__new_buf(NULL, 0); if (o < 0 || s < 0 || o > b->size || s > b->size - o) return r; r.data = b->data + o; r.size = s; return r; } static stbtt__buf stbtt__cff_get_index(stbtt__buf *b) { int count, start, offsize; start = b->cursor; count = stbtt__buf_get16(b); if (count) { offsize = stbtt__buf_get8(b); ASSERT(offsize >= 1 && offsize <= 4); stbtt__buf_skip(b, offsize * count); stbtt__buf_skip(b, stbtt__buf_get(b, offsize) - 1); } return stbtt__buf_range(b, start, b->cursor - start); } static uint32_t stbtt__cff_int(stbtt__buf *b) { int b0 = stbtt__buf_get8(b); if (b0 >= 32 && b0 <= 246) return b0 - 139; else if (b0 >= 247 && b0 <= 250) return (b0 - 247)*256 + stbtt__buf_get8(b) + 108; else if (b0 >= 251 && b0 <= 254) return -(b0 - 251)*256 - stbtt__buf_get8(b) - 108; else if (b0 == 28) return stbtt__buf_get16(b); else if (b0 == 29) return stbtt__buf_get32(b); ASSERT(0); return 0; } static void stbtt__cff_skip_operand(stbtt__buf *b) { int v, b0 = stbtt__buf_peek8(b); ASSERT(b0 >= 28); if (b0 == 30) { stbtt__buf_skip(b, 1); while (b->cursor < b->size) { v = stbtt__buf_get8(b); if ((v & 0xF) == 0xF || (v >> 4) == 0xF) break; } } else { stbtt__cff_int(b); } } static stbtt__buf stbtt__dict_get(stbtt__buf *b, int key) { stbtt__buf_seek(b, 0); while (b->cursor < b->size) { int start = b->cursor, end, op; while (stbtt__buf_peek8(b) >= 28) stbtt__cff_skip_operand(b); end = b->cursor; op = stbtt__buf_get8(b); if (op == 12) op = stbtt__buf_get8(b) | 0x100; if (op == key) return stbtt__buf_range(b, start, end-start); } return stbtt__buf_range(b, 0, 0); } static void stbtt__dict_get_ints(stbtt__buf *b, int key, int outcount, uint32_t *out) { int i; stbtt__buf operands = stbtt__dict_get(b, key); for (i = 0; i < outcount && operands.cursor < operands.size; i++) out[i] = stbtt__cff_int(&operands); } static int stbtt__cff_index_count(stbtt__buf *b) { stbtt__buf_seek(b, 0); return stbtt__buf_get16(b); } static stbtt__buf stbtt__cff_index_get(stbtt__buf b, int i) { int count, offsize, start, end; stbtt__buf_seek(&b, 0); count = stbtt__buf_get16(&b); offsize = stbtt__buf_get8(&b); ASSERT(i >= 0 && i < count); ASSERT(offsize >= 1 && offsize <= 4); stbtt__buf_skip(&b, i*offsize); start = stbtt__buf_get(&b, offsize); end = stbtt__buf_get(&b, offsize); return stbtt__buf_range(&b, 2+(count+1)*offsize+start, end - start); } ////////////////////////////////////////////////////////////////////////// // // accessors to parse data from file // // on platforms that don't allow misaligned reads, if we want to allow // truetype fonts that aren't padded to alignment, define ALLOW_UNALIGNED_TRUETYPE static int stbtt__isfont(uint8_t *font) { // check the version number if (stbtt_tag(font, "1\0\0\0")) return 1; // TrueType 1 if (stbtt_tag(font, "typ1")) return 1; // TrueType with type 1 font -- we don't support this! if (stbtt_tag(font, "OTTO")) return 1; // OpenType with CFF if (stbtt_tag(font, "\0\1\0\0")) return 1; // OpenType 1.0 if (stbtt_tag(font, "true")) return 1; // Apple specification for TrueType fonts return 0; } // @OPTIMIZE: binary search static uint32_t stbtt__find_table(uint8_t *data, int fontstart, const char *tag) { ASSERT(fontstart >= 0); int32_t num_tables = ttUSHORT(data+fontstart+4); uint32_t tabledir = fontstart + 12; int32_t i; for (i=0; i < num_tables; ++i) { uint32_t loc = tabledir + 16*i; if (stbtt_tag(data+loc+0, tag)) return ttULONG(data+loc+8); } return 0; } static int stbtt_GetFontOffsetForIndex_internal(unsigned char *font_collection, int index) { // if it's just a font, there's only one valid index if (stbtt__isfont(font_collection)) return index == 0 ? 0 : -1; // check if it's a TTC if (stbtt_tag(font_collection, "ttcf")) { // version 1? if (ttULONG(font_collection+4) == 0x00010000 || ttULONG(font_collection+4) == 0x00020000) { int32_t n = ttLONG(font_collection+8); if (index >= n) return -1; return ttULONG(font_collection+12+index*4); } } return -1; } static int stbtt_GetNumberOfFonts_internal(unsigned char *font_collection) { // if it's just a font, there's only one valid font if (stbtt__isfont(font_collection)) return 1; // check if it's a TTC if (stbtt_tag(font_collection, "ttcf")) { // version 1? if (ttULONG(font_collection+4) == 0x00010000 || ttULONG(font_collection+4) == 0x00020000) { return ttLONG(font_collection+8); } } return 0; } static stbtt__buf stbtt__get_subrs(stbtt__buf cff, stbtt__buf fontdict) { uint32_t subrsoff = 0, private_loc[2] = { 0, 0 }; stbtt__buf pdict; stbtt__dict_get_ints(&fontdict, 18, 2, private_loc); if (!private_loc[1] || !private_loc[0]) return stbtt__new_buf(NULL, 0); pdict = stbtt__buf_range(&cff, private_loc[1], private_loc[0]); stbtt__dict_get_ints(&pdict, 19, 1, &subrsoff); if (!subrsoff) return stbtt__new_buf(NULL, 0); stbtt__buf_seek(&cff, private_loc[1]+subrsoff); return stbtt__cff_get_index(&cff); } // since most people won't use this, find this table the first time it's needed static int stbtt__get_svg(stbtt_fontinfo *info) { uint32_t t, offset; if (info->svg < 0) { t = stbtt__find_table(info->data, info->fontstart, "SVG "); if (t) { offset = ttULONG(info->data + t + 2); info->svg = t + offset; } else { info->svg = 0; } } return info->svg; } static int stbtt_InitFont_internal(stbtt_fontinfo *info, unsigned char *data, int fontstart) { uint32_t cmap, t; int32_t i,numTables; info->data = data; info->fontstart = fontstart; info->cff = stbtt__new_buf(NULL, 0); cmap = stbtt__find_table(data, fontstart, "cmap"); // required info->loca = stbtt__find_table(data, fontstart, "loca"); // required info->head = stbtt__find_table(data, fontstart, "head"); // required info->glyf = stbtt__find_table(data, fontstart, "glyf"); // required info->hhea = stbtt__find_table(data, fontstart, "hhea"); // required info->hmtx = stbtt__find_table(data, fontstart, "hmtx"); // required info->kern = stbtt__find_table(data, fontstart, "kern"); // not required info->gpos = stbtt__find_table(data, fontstart, "GPOS"); // not required if (!cmap || !info->head || !info->hhea || !info->hmtx) return 0; if (info->glyf) { // required for truetype if (!info->loca) return 0; } else { // initialization for CFF / Type2 fonts (OTF) stbtt__buf b, topdict, topdictidx; uint32_t cstype = 2, charstrings = 0, fdarrayoff = 0, fdselectoff = 0; uint32_t cff; cff = stbtt__find_table(data, fontstart, "CFF "); if (!cff) return 0; info->fontdicts = stbtt__new_buf(NULL, 0); info->fdselect = stbtt__new_buf(NULL, 0); // @TODO this should use size from table (not 512MB) info->cff = stbtt__new_buf(data+cff, 512*1024*1024); b = info->cff; // read the header stbtt__buf_skip(&b, 2); stbtt__buf_seek(&b, stbtt__buf_get8(&b)); // hdrsize // @TODO the name INDEX could list multiple fonts, // but we just use the first one. stbtt__cff_get_index(&b); // name INDEX topdictidx = stbtt__cff_get_index(&b); topdict = stbtt__cff_index_get(topdictidx, 0); stbtt__cff_get_index(&b); // string INDEX info->gsubrs = stbtt__cff_get_index(&b); stbtt__dict_get_ints(&topdict, 17, 1, &charstrings); stbtt__dict_get_ints(&topdict, 0x100 | 6, 1, &cstype); stbtt__dict_get_ints(&topdict, 0x100 | 36, 1, &fdarrayoff); stbtt__dict_get_ints(&topdict, 0x100 | 37, 1, &fdselectoff); info->subrs = stbtt__get_subrs(b, topdict); // we only support Type 2 charstrings if (cstype != 2) return 0; if (charstrings == 0) return 0; if (fdarrayoff) { // looks like a CID font if (!fdselectoff) return 0; stbtt__buf_seek(&b, fdarrayoff); info->fontdicts = stbtt__cff_get_index(&b); info->fdselect = stbtt__buf_range(&b, fdselectoff, b.size-fdselectoff); } stbtt__buf_seek(&b, charstrings); info->charstrings = stbtt__cff_get_index(&b); } t = stbtt__find_table(data, fontstart, "maxp"); if (t) info->numGlyphs = ttUSHORT(data+t+4); else info->numGlyphs = 0xffff; info->svg = -1; // find a cmap encoding table we understand *now* to avoid searching // later. (todo: could make this installable) // the same regardless of glyph. numTables = ttUSHORT(data + cmap + 2); info->index_map = 0; for (i=0; i < numTables; ++i) { uint32_t encoding_record = cmap + 4 + 8 * i; // find an encoding we understand: switch(ttUSHORT(data+encoding_record)) { case STBTT_PLATFORM_ID_MICROSOFT: switch (ttUSHORT(data+encoding_record+2)) { case STBTT_MS_EID_UNICODE_BMP: case STBTT_MS_EID_UNICODE_FULL: // MS/Unicode info->index_map = cmap + ttULONG(data+encoding_record+4); break; } break; case STBTT_PLATFORM_ID_UNICODE: // Mac/iOS has these // all the encodingIDs are unicode, so we don't bother to check it info->index_map = cmap + ttULONG(data+encoding_record+4); break; } } if (info->index_map == 0) return 0; info->indexToLocFormat = ttUSHORT(data+info->head + 50); return 1; } // If you're going to perform multiple operations on the same character // and you want a speed-up, call this function with the character you're // going to process, then use glyph-based functions instead of the // codepoint-based functions. // Returns 0 if the character codepoint is not defined in the font. int stbtt_FindGlyphIndex(const stbtt_fontinfo *info, int unicode_codepoint) { uint8_t *data = info->data; uint32_t index_map = info->index_map; uint16_t format = ttUSHORT(data + index_map + 0); if (format == 0) { // apple byte encoding int32_t bytes = ttUSHORT(data + index_map + 2); if (unicode_codepoint < bytes-6) return ttBYTE(data + index_map + 6 + unicode_codepoint); return 0; } else if (format == 6) { uint32_t first = ttUSHORT(data + index_map + 6); uint32_t count = ttUSHORT(data + index_map + 8); if ((uint32_t) unicode_codepoint >= first && (uint32_t) unicode_codepoint < first+count) return ttUSHORT(data + index_map + 10 + (unicode_codepoint - first)*2); return 0; } else if (format == 2) { ASSERT(0); // @TODO: high-byte mapping for japanese/chinese/korean return 0; } else if (format == 4) { // standard mapping for windows fonts: binary search collection of ranges uint16_t segcount = ttUSHORT(data+index_map+6) >> 1; uint16_t searchRange = ttUSHORT(data+index_map+8) >> 1; uint16_t entrySelector = ttUSHORT(data+index_map+10); uint16_t rangeShift = ttUSHORT(data+index_map+12) >> 1; // do a binary search of the segments uint32_t endCount = index_map + 14; uint32_t search = endCount; if (unicode_codepoint > 0xffff) return 0; // they lie from endCount .. endCount + segCount // but searchRange is the nearest power of two, so... if (unicode_codepoint >= ttUSHORT(data + search + rangeShift*2)) search += rangeShift*2; // now decrement to bias correctly to find smallest search -= 2; while (entrySelector) { uint16_t end; searchRange >>= 1; end = ttUSHORT(data + search + searchRange*2); if (unicode_codepoint > end) search += searchRange*2; --entrySelector; } search += 2; { uint16_t offset, start, last; uint16_t item = (uint16_t) ((search - endCount) >> 1); start = ttUSHORT(data + index_map + 14 + segcount*2 + 2 + 2*item); last = ttUSHORT(data + endCount + 2*item); if (unicode_codepoint < start || unicode_codepoint > last) return 0; offset = ttUSHORT(data + index_map + 14 + segcount*6 + 2 + 2*item); if (offset == 0) return (uint16_t) (unicode_codepoint + ttSHORT(data + index_map + 14 + segcount*4 + 2 + 2*item)); return ttUSHORT(data + offset + (unicode_codepoint-start)*2 + index_map + 14 + segcount*6 + 2 + 2*item); } } else if (format == 12 || format == 13) { uint32_t ngroups = ttULONG(data+index_map+12); int32_t low,high; low = 0; high = (int32_t)ngroups; // Binary search the right group. while (low < high) { int32_t mid = low + ((high-low) >> 1); // rounds down, so low <= mid < high uint32_t start_char = ttULONG(data+index_map+16+mid*12); uint32_t end_char = ttULONG(data+index_map+16+mid*12+4); if ((uint32_t) unicode_codepoint < start_char) high = mid; else if ((uint32_t) unicode_codepoint > end_char) low = mid+1; else { uint32_t start_glyph = ttULONG(data+index_map+16+mid*12+8); if (format == 12) return start_glyph + unicode_codepoint-start_char; else // format == 13 return start_glyph; } } return 0; // not found } // @TODO ASSERT(0); return 0; } int stbtt_GetCodepointShape(const stbtt_fontinfo *info, int unicode_codepoint, stbtt_vertex **vertices) { return stbtt_GetGlyphShape(info, stbtt_FindGlyphIndex(info, unicode_codepoint), vertices); } static void stbtt_setvertex(stbtt_vertex *v, uint8_t type, int32_t x, int32_t y, int32_t cx, int32_t cy) { v->type = type; v->x = (int16_t) x; v->y = (int16_t) y; v->cx = (int16_t) cx; v->cy = (int16_t) cy; } static int stbtt__GetGlyfOffset(const stbtt_fontinfo *info, int glyph_index) { int g1,g2; ASSERT(!info->cff.size); if (glyph_index >= info->numGlyphs) return -1; // glyph index out of range if (info->indexToLocFormat >= 2) return -1; // unknown index->glyph map format if (info->indexToLocFormat == 0) { g1 = info->glyf + ttUSHORT(info->data + info->loca + glyph_index * 2) * 2; g2 = info->glyf + ttUSHORT(info->data + info->loca + glyph_index * 2 + 2) * 2; } else { g1 = info->glyf + ttULONG (info->data + info->loca + glyph_index * 4); g2 = info->glyf + ttULONG (info->data + info->loca + glyph_index * 4 + 4); } return g1==g2 ? -1 : g1; // if length is 0, return -1 } static int stbtt__GetGlyphInfoT2(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1); int stbtt_GetGlyphBox(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1) { if (info->cff.size) { stbtt__GetGlyphInfoT2(info, glyph_index, x0, y0, x1, y1); } else { int g = stbtt__GetGlyfOffset(info, glyph_index); if (g < 0) return 0; if (x0) *x0 = ttSHORT(info->data + g + 2); if (y0) *y0 = ttSHORT(info->data + g + 4); if (x1) *x1 = ttSHORT(info->data + g + 6); if (y1) *y1 = ttSHORT(info->data + g + 8); } return 1; } // Gets the bounding box of the visible part of the glyph, in unscaled coordinates int stbtt_GetCodepointBox(const stbtt_fontinfo *info, int codepoint, int *x0, int *y0, int *x1, int *y1) { return stbtt_GetGlyphBox(info, stbtt_FindGlyphIndex(info,codepoint), x0,y0,x1,y1); } // returns non-zero if nothing is drawn for this glyph int stbtt_IsGlyphEmpty(const stbtt_fontinfo *info, int glyph_index) { int16_t numberOfContours; int g; if (info->cff.size) return stbtt__GetGlyphInfoT2(info, glyph_index, NULL, NULL, NULL, NULL) == 0; g = stbtt__GetGlyfOffset(info, glyph_index); if (g < 0) return 1; numberOfContours = ttSHORT(info->data + g); return numberOfContours == 0; } static int stbtt__close_shape(stbtt_vertex *vertices, int num_vertices, int was_off, int start_off, int32_t sx, int32_t sy, int32_t scx, int32_t scy, int32_t cx, int32_t cy) { if (start_off) { if (was_off) stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, (cx+scx)>>1, (cy+scy)>>1, cx,cy); stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, sx,sy,scx,scy); } else { if (was_off) stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve,sx,sy,cx,cy); else stbtt_setvertex(&vertices[num_vertices++], STBTT_vline,sx,sy,0,0); } return num_vertices; } static int stbtt__GetGlyphShapeTT(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) { int16_t numberOfContours; uint8_t *endPtsOfContours; uint8_t *data = info->data; stbtt_vertex *vertices=0; int num_vertices=0; int g = stbtt__GetGlyfOffset(info, glyph_index); *pvertices = NULL; if (g < 0) return 0; numberOfContours = ttSHORT(data + g); if (numberOfContours > 0) { uint8_t flags=0,flagcount; int32_t ins, i,j=0,m,n, next_move, was_off=0, off, start_off=0; int32_t x,y,cx,cy,sx,sy, scx,scy; uint8_t *points; endPtsOfContours = (data + g + 10); ins = ttUSHORT(data + g + 10 + numberOfContours * 2); points = data + g + 10 + numberOfContours * 2 + 2 + ins; n = 1+ttUSHORT(endPtsOfContours + numberOfContours*2-2); m = n + 2*numberOfContours; // a loose bound on how many vertices we might need vertices = STBTT_malloc(m * sizeof(vertices[0]), info->userdata); if (vertices == 0) return 0; next_move = 0; flagcount=0; // in first pass, we load uninterpreted data into the allocated array // above, shifted to the end of the array so we won't overwrite it when // we create our final data starting from the front off = m - n; // starting offset for uninterpreted data, regardless of how m ends up being calculated // first load flags for (i=0; i < n; ++i) { if (flagcount == 0) { flags = *points++; if (flags & 8) flagcount = *points++; } else --flagcount; vertices[off+i].type = flags; } // now load x coordinates x=0; for (i=0; i < n; ++i) { flags = vertices[off+i].type; if (flags & 2) { int16_t dx = *points++; x += (flags & 16) ? dx : -dx; // ??? } else { if (!(flags & 16)) { x = x + (int16_t) (points[0]*256 + points[1]); points += 2; } } vertices[off+i].x = (int16_t) x; } // now load y coordinates y=0; for (i=0; i < n; ++i) { flags = vertices[off+i].type; if (flags & 4) { int16_t dy = *points++; y += (flags & 32) ? dy : -dy; // ??? } else { if (!(flags & 32)) { y = y + (int16_t) (points[0]*256 + points[1]); points += 2; } } vertices[off+i].y = (int16_t) y; } // now convert them to our format num_vertices=0; sx = sy = cx = cy = scx = scy = 0; for (i=0; i < n; ++i) { flags = vertices[off+i].type; x = (int16_t) vertices[off+i].x; y = (int16_t) vertices[off+i].y; if (next_move == i) { if (i != 0) num_vertices = stbtt__close_shape(vertices, num_vertices, was_off, start_off, sx,sy,scx,scy,cx,cy); // now start the new one start_off = !(flags & 1); if (start_off) { // if we start off with an off-curve point, then when we need to find a point on the curve // where we can start, and we need to save some state for when we wraparound. scx = x; scy = y; if (!(vertices[off+i+1].type & 1)) { // next point is also a curve point, so interpolate an on-point curve sx = (x + (int32_t) vertices[off+i+1].x) >> 1; sy = (y + (int32_t) vertices[off+i+1].y) >> 1; } else { // otherwise just use the next point as our start point sx = (int32_t) vertices[off+i+1].x; sy = (int32_t) vertices[off+i+1].y; ++i; // we're using point i+1 as the starting point, so skip it } } else { sx = x; sy = y; } stbtt_setvertex(&vertices[num_vertices++], STBTT_vmove,sx,sy,0,0); was_off = 0; next_move = 1 + ttUSHORT(endPtsOfContours+j*2); ++j; } else { if (!(flags & 1)) { // if it's a curve if (was_off) // two off-curve control points in a row means interpolate an on-curve midpoint stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, (cx+x)>>1, (cy+y)>>1, cx, cy); cx = x; cy = y; was_off = 1; } else { if (was_off) stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, x,y, cx, cy); else stbtt_setvertex(&vertices[num_vertices++], STBTT_vline, x,y,0,0); was_off = 0; } } } num_vertices = stbtt__close_shape(vertices, num_vertices, was_off, start_off, sx,sy,scx,scy,cx,cy); } else if (numberOfContours < 0) { // Compound shapes. int more = 1; uint8_t *comp = data + g + 10; num_vertices = 0; vertices = 0; while (more) { uint16_t flags, gidx; int comp_num_verts = 0, i; stbtt_vertex *comp_verts = 0, *tmp = 0; float mtx[6] = {1,0,0,1,0,0}, m, n; flags = ttSHORT(comp); comp+=2; gidx = ttSHORT(comp); comp+=2; if (flags & 2) { // XY values if (flags & 1) { // shorts mtx[4] = ttSHORT(comp); comp+=2; mtx[5] = ttSHORT(comp); comp+=2; } else { mtx[4] = ttCHAR(comp); comp+=1; mtx[5] = ttCHAR(comp); comp+=1; } } else { // @TODO handle matching point ASSERT(0); } if (flags & (1<<3)) { // WE_HAVE_A_SCALE mtx[0] = mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; mtx[1] = mtx[2] = 0; } else if (flags & (1<<6)) { // WE_HAVE_AN_X_AND_YSCALE mtx[0] = ttSHORT(comp)/16384.0f; comp+=2; mtx[1] = mtx[2] = 0; mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; } else if (flags & (1<<7)) { // WE_HAVE_A_TWO_BY_TWO mtx[0] = ttSHORT(comp)/16384.0f; comp+=2; mtx[1] = ttSHORT(comp)/16384.0f; comp+=2; mtx[2] = ttSHORT(comp)/16384.0f; comp+=2; mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; } // Find transformation scales. m = (float) sqrt(mtx[0]*mtx[0] + mtx[1]*mtx[1]); n = (float) sqrt(mtx[2]*mtx[2] + mtx[3]*mtx[3]); // Get indexed glyph. comp_num_verts = stbtt_GetGlyphShape(info, gidx, &comp_verts); if (comp_num_verts > 0) { // Transform vertices. for (i = 0; i < comp_num_verts; ++i) { stbtt_vertex* v = &comp_verts[i]; stbtt_vertex_type x,y; x=v->x; y=v->y; v->x = (stbtt_vertex_type)(m * (mtx[0]*x + mtx[2]*y + mtx[4])); v->y = (stbtt_vertex_type)(n * (mtx[1]*x + mtx[3]*y + mtx[5])); x=v->cx; y=v->cy; v->cx = (stbtt_vertex_type)(m * (mtx[0]*x + mtx[2]*y + mtx[4])); v->cy = (stbtt_vertex_type)(n * (mtx[1]*x + mtx[3]*y + mtx[5])); } // Append vertices. tmp = STBTT_malloc((num_vertices+comp_num_verts)*sizeof(stbtt_vertex), info->userdata); if (!tmp) { if (vertices) STBTT_free(vertices, info->userdata); if (comp_verts) STBTT_free(comp_verts, info->userdata); return 0; } if (num_vertices > 0 && vertices) memcpy(tmp, vertices, num_vertices*sizeof(stbtt_vertex)); memcpy(tmp+num_vertices, comp_verts, comp_num_verts*sizeof(stbtt_vertex)); if (vertices) STBTT_free(vertices, info->userdata); vertices = tmp; STBTT_free(comp_verts, info->userdata); num_vertices += comp_num_verts; } // More components ? more = flags & (1<<5); } } else { // numberOfCounters == 0, do nothing } *pvertices = vertices; return num_vertices; } static void stbtt__track_vertex(stbtt__csctx *c, int32_t x, int32_t y) { if (x > c->max_x || !c->started) c->max_x = x; if (y > c->max_y || !c->started) c->max_y = y; if (x < c->min_x || !c->started) c->min_x = x; if (y < c->min_y || !c->started) c->min_y = y; c->started = 1; } static void stbtt__csctx_v(stbtt__csctx *c, uint8_t type, int32_t x, int32_t y, int32_t cx, int32_t cy, int32_t cx1, int32_t cy1) { if (c->bounds) { stbtt__track_vertex(c, x, y); if (type == STBTT_vcubic) { stbtt__track_vertex(c, cx, cy); stbtt__track_vertex(c, cx1, cy1); } } else { stbtt_setvertex(&c->pvertices[c->num_vertices], type, x, y, cx, cy); c->pvertices[c->num_vertices].cx1 = (int16_t) cx1; c->pvertices[c->num_vertices].cy1 = (int16_t) cy1; } c->num_vertices++; } static void stbtt__csctx_close_shape(stbtt__csctx *ctx) { if (ctx->first_x != ctx->x || ctx->first_y != ctx->y) stbtt__csctx_v(ctx, STBTT_vline, (int)ctx->first_x, (int)ctx->first_y, 0, 0, 0, 0); } static void stbtt__csctx_rmove_to(stbtt__csctx *ctx, float dx, float dy) { stbtt__csctx_close_shape(ctx); ctx->first_x = ctx->x = ctx->x + dx; ctx->first_y = ctx->y = ctx->y + dy; stbtt__csctx_v(ctx, STBTT_vmove, (int)ctx->x, (int)ctx->y, 0, 0, 0, 0); } static void stbtt__csctx_rline_to(stbtt__csctx *ctx, float dx, float dy) { ctx->x += dx; ctx->y += dy; stbtt__csctx_v(ctx, STBTT_vline, (int)ctx->x, (int)ctx->y, 0, 0, 0, 0); } static void stbtt__csctx_rccurve_to(stbtt__csctx *ctx, float dx1, float dy1, float dx2, float dy2, float dx3, float dy3) { float cx1 = ctx->x + dx1; float cy1 = ctx->y + dy1; float cx2 = cx1 + dx2; float cy2 = cy1 + dy2; ctx->x = cx2 + dx3; ctx->y = cy2 + dy3; stbtt__csctx_v(ctx, STBTT_vcubic, (int)ctx->x, (int)ctx->y, (int)cx1, (int)cy1, (int)cx2, (int)cy2); } static stbtt__buf stbtt__get_subr(stbtt__buf idx, int n) { int count = stbtt__cff_index_count(&idx); int bias = 107; if (count >= 33900) bias = 32768; else if (count >= 1240) bias = 1131; n += bias; if (n < 0 || n >= count) return stbtt__new_buf(NULL, 0); return stbtt__cff_index_get(idx, n); } static stbtt__buf stbtt__cid_get_glyph_subrs(const stbtt_fontinfo *info, int glyph_index) { stbtt__buf fdselect = info->fdselect; int nranges, start, end, v, fmt, fdselector = -1, i; stbtt__buf_seek(&fdselect, 0); fmt = stbtt__buf_get8(&fdselect); if (fmt == 0) { // untested stbtt__buf_skip(&fdselect, glyph_index); fdselector = stbtt__buf_get8(&fdselect); } else if (fmt == 3) { nranges = stbtt__buf_get16(&fdselect); start = stbtt__buf_get16(&fdselect); for (i = 0; i < nranges; i++) { v = stbtt__buf_get8(&fdselect); end = stbtt__buf_get16(&fdselect); if (glyph_index >= start && glyph_index < end) { fdselector = v; break; } start = end; } } if (fdselector == -1) stbtt__new_buf(NULL, 0); return stbtt__get_subrs(info->cff, stbtt__cff_index_get(info->fontdicts, fdselector)); } static int stbtt__run_charstring(const stbtt_fontinfo *info, int glyph_index, stbtt__csctx *c) { int in_header = 1, maskbits = 0, subr_stack_height = 0, sp = 0, v, i, b0; int has_subrs = 0, clear_stack; float s[48]; stbtt__buf subr_stack[10], subrs = info->subrs, b; float f; #define STBTT__CSERR(s) (0) // this currently ignores the initial width value, which isn't needed if we have hmtx b = stbtt__cff_index_get(info->charstrings, glyph_index); while (b.cursor < b.size) { i = 0; clear_stack = 1; b0 = stbtt__buf_get8(&b); switch (b0) { // @TODO implement hinting case 0x13: // hintmask case 0x14: // cntrmask if (in_header) maskbits += (sp / 2); // implicit "vstem" in_header = 0; stbtt__buf_skip(&b, (maskbits + 7) / 8); break; case 0x01: // hstem case 0x03: // vstem case 0x12: // hstemhm case 0x17: // vstemhm maskbits += (sp / 2); break; case 0x15: // rmoveto in_header = 0; if (sp < 2) return STBTT__CSERR("rmoveto stack"); stbtt__csctx_rmove_to(c, s[sp-2], s[sp-1]); break; case 0x04: // vmoveto in_header = 0; if (sp < 1) return STBTT__CSERR("vmoveto stack"); stbtt__csctx_rmove_to(c, 0, s[sp-1]); break; case 0x16: // hmoveto in_header = 0; if (sp < 1) return STBTT__CSERR("hmoveto stack"); stbtt__csctx_rmove_to(c, s[sp-1], 0); break; case 0x05: // rlineto if (sp < 2) return STBTT__CSERR("rlineto stack"); for (; i + 1 < sp; i += 2) stbtt__csctx_rline_to(c, s[i], s[i+1]); break; // hlineto/vlineto and vhcurveto/hvcurveto alternate horizontal and vertical // starting from a different place. case 0x07: // vlineto if (sp < 1) return STBTT__CSERR("vlineto stack"); goto vlineto; case 0x06: // hlineto if (sp < 1) return STBTT__CSERR("hlineto stack"); for (;;) { if (i >= sp) break; stbtt__csctx_rline_to(c, s[i], 0); i++; vlineto: if (i >= sp) break; stbtt__csctx_rline_to(c, 0, s[i]); i++; } break; case 0x1F: // hvcurveto if (sp < 4) return STBTT__CSERR("hvcurveto stack"); goto hvcurveto; case 0x1E: // vhcurveto if (sp < 4) return STBTT__CSERR("vhcurveto stack"); for (;;) { if (i + 3 >= sp) break; stbtt__csctx_rccurve_to(c, 0, s[i], s[i+1], s[i+2], s[i+3], (sp - i == 5) ? s[i + 4] : 0.0f); i += 4; hvcurveto: if (i + 3 >= sp) break; stbtt__csctx_rccurve_to(c, s[i], 0, s[i+1], s[i+2], (sp - i == 5) ? s[i+4] : 0.0f, s[i+3]); i += 4; } break; case 0x08: // rrcurveto if (sp < 6) return STBTT__CSERR("rcurveline stack"); for (; i + 5 < sp; i += 6) stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); break; case 0x18: // rcurveline if (sp < 8) return STBTT__CSERR("rcurveline stack"); for (; i + 5 < sp - 2; i += 6) stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); if (i + 1 >= sp) return STBTT__CSERR("rcurveline stack"); stbtt__csctx_rline_to(c, s[i], s[i+1]); break; case 0x19: // rlinecurve if (sp < 8) return STBTT__CSERR("rlinecurve stack"); for (; i + 1 < sp - 6; i += 2) stbtt__csctx_rline_to(c, s[i], s[i+1]); if (i + 5 >= sp) return STBTT__CSERR("rlinecurve stack"); stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); break; case 0x1A: // vvcurveto case 0x1B: // hhcurveto if (sp < 4) return STBTT__CSERR("(vv|hh)curveto stack"); f = 0.0; if (sp & 1) { f = s[i]; i++; } for (; i + 3 < sp; i += 4) { if (b0 == 0x1B) stbtt__csctx_rccurve_to(c, s[i], f, s[i+1], s[i+2], s[i+3], 0.0); else stbtt__csctx_rccurve_to(c, f, s[i], s[i+1], s[i+2], 0.0, s[i+3]); f = 0.0; } break; case 0x0A: // callsubr if (!has_subrs) { if (info->fdselect.size) subrs = stbtt__cid_get_glyph_subrs(info, glyph_index); has_subrs = 1; } // FALLTHROUGH case 0x1D: // callgsubr if (sp < 1) return STBTT__CSERR("call(g|)subr stack"); v = (int) s[--sp]; if (subr_stack_height >= 10) return STBTT__CSERR("recursion limit"); subr_stack[subr_stack_height++] = b; b = stbtt__get_subr(b0 == 0x0A ? subrs : info->gsubrs, v); if (b.size == 0) return STBTT__CSERR("subr not found"); b.cursor = 0; clear_stack = 0; break; case 0x0B: // return if (subr_stack_height <= 0) return STBTT__CSERR("return outside subr"); b = subr_stack[--subr_stack_height]; clear_stack = 0; break; case 0x0E: // endchar stbtt__csctx_close_shape(c); return 1; case 0x0C: { // two-byte escape float dx1, dx2, dx3, dx4, dx5, dx6, dy1, dy2, dy3, dy4, dy5, dy6; float dx, dy; int b1 = stbtt__buf_get8(&b); switch (b1) { // @TODO These "flex" implementations ignore the flex-depth and resolution, // and always draw beziers. case 0x22: // hflex if (sp < 7) return STBTT__CSERR("hflex stack"); dx1 = s[0]; dx2 = s[1]; dy2 = s[2]; dx3 = s[3]; dx4 = s[4]; dx5 = s[5]; dx6 = s[6]; stbtt__csctx_rccurve_to(c, dx1, 0, dx2, dy2, dx3, 0); stbtt__csctx_rccurve_to(c, dx4, 0, dx5, -dy2, dx6, 0); break; case 0x23: // flex if (sp < 13) return STBTT__CSERR("flex stack"); dx1 = s[0]; dy1 = s[1]; dx2 = s[2]; dy2 = s[3]; dx3 = s[4]; dy3 = s[5]; dx4 = s[6]; dy4 = s[7]; dx5 = s[8]; dy5 = s[9]; dx6 = s[10]; dy6 = s[11]; //fd is s[12] stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, dy3); stbtt__csctx_rccurve_to(c, dx4, dy4, dx5, dy5, dx6, dy6); break; case 0x24: // hflex1 if (sp < 9) return STBTT__CSERR("hflex1 stack"); dx1 = s[0]; dy1 = s[1]; dx2 = s[2]; dy2 = s[3]; dx3 = s[4]; dx4 = s[5]; dx5 = s[6]; dy5 = s[7]; dx6 = s[8]; stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, 0); stbtt__csctx_rccurve_to(c, dx4, 0, dx5, dy5, dx6, -(dy1+dy2+dy5)); break; case 0x25: // flex1 if (sp < 11) return STBTT__CSERR("flex1 stack"); dx1 = s[0]; dy1 = s[1]; dx2 = s[2]; dy2 = s[3]; dx3 = s[4]; dy3 = s[5]; dx4 = s[6]; dy4 = s[7]; dx5 = s[8]; dy5 = s[9]; dx6 = dy6 = s[10]; dx = dx1+dx2+dx3+dx4+dx5; dy = dy1+dy2+dy3+dy4+dy5; if (fabs(dx) > fabs(dy)) dy6 = -dy; else dx6 = -dx; stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, dy3); stbtt__csctx_rccurve_to(c, dx4, dy4, dx5, dy5, dx6, dy6); break; default: return STBTT__CSERR("unimplemented"); } } break; default: if (b0 != 255 && b0 != 28 && b0 < 32) return STBTT__CSERR("reserved operator"); // push immediate if (b0 == 255) { f = (float)(int32_t)stbtt__buf_get32(&b) / 0x10000; } else { stbtt__buf_skip(&b, -1); f = (float)(int16_t)stbtt__cff_int(&b); } if (sp >= 48) return STBTT__CSERR("push stack overflow"); s[sp++] = f; clear_stack = 0; break; } if (clear_stack) sp = 0; } return STBTT__CSERR("no endchar"); #undef STBTT__CSERR } static int stbtt__GetGlyphShapeT2(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) { // runs the charstring twice, once to count and once to output (to avoid realloc) stbtt__csctx count_ctx = STBTT__CSCTX_INIT(1); stbtt__csctx output_ctx = STBTT__CSCTX_INIT(0); if (stbtt__run_charstring(info, glyph_index, &count_ctx)) { *pvertices = STBTT_malloc(count_ctx.num_vertices*sizeof(stbtt_vertex), info->userdata); output_ctx.pvertices = *pvertices; if (stbtt__run_charstring(info, glyph_index, &output_ctx)) { ASSERT(output_ctx.num_vertices == count_ctx.num_vertices); return output_ctx.num_vertices; } } *pvertices = NULL; return 0; } static int stbtt__GetGlyphInfoT2(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1) { stbtt__csctx c = STBTT__CSCTX_INIT(1); int r = stbtt__run_charstring(info, glyph_index, &c); if (x0) *x0 = r ? c.min_x : 0; if (y0) *y0 = r ? c.min_y : 0; if (x1) *x1 = r ? c.max_x : 0; if (y1) *y1 = r ? c.max_y : 0; return r ? c.num_vertices : 0; } // returns # of vertices and fills *vertices with the pointer to them // these are expressed in "unscaled" coordinates // // The shape is a series of contours. Each one starts with // a STBTT_moveto, then consists of a series of mixed // STBTT_lineto and STBTT_curveto segments. A lineto // draws a line from previous endpoint to its x,y; a curveto // draws a quadratic bezier from previous endpoint to // its x,y, using cx,cy as the bezier control point. int stbtt_GetGlyphShape(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) { if (!info->cff.size) return stbtt__GetGlyphShapeTT(info, glyph_index, pvertices); else return stbtt__GetGlyphShapeT2(info, glyph_index, pvertices); } void stbtt_GetGlyphHMetrics(const stbtt_fontinfo *info, int glyph_index, int *advanceWidth, int *leftSideBearing) { uint16_t numOfLongHorMetrics = ttUSHORT(info->data+info->hhea + 34); if (glyph_index < numOfLongHorMetrics) { if (advanceWidth) *advanceWidth = ttSHORT(info->data + info->hmtx + 4*glyph_index); if (leftSideBearing) *leftSideBearing = ttSHORT(info->data + info->hmtx + 4*glyph_index + 2); } else { if (advanceWidth) *advanceWidth = ttSHORT(info->data + info->hmtx + 4*(numOfLongHorMetrics-1)); if (leftSideBearing) *leftSideBearing = ttSHORT(info->data + info->hmtx + 4*numOfLongHorMetrics + 2*(glyph_index - numOfLongHorMetrics)); } } // Retrieves a complete list of all of the kerning pairs provided by the font // stbtt_GetKerningTable never writes more than table_length entries and returns how many entries it did write. // The table will be sorted by (a.glyph1 == b.glyph1)?(a.glyph2 < b.glyph2):(a.glyph1 < b.glyph1) int stbtt_GetKerningTableLength(const stbtt_fontinfo *info) { uint8_t *data = info->data + info->kern; // we only look at the first table. it must be 'horizontal' and format 0. if (!info->kern) return 0; if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 return 0; if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format return 0; return ttUSHORT(data+10); } int stbtt_GetKerningTable(const stbtt_fontinfo *info, stbtt_kerningentry* table, int table_length) { uint8_t *data = info->data + info->kern; int k, length; // we only look at the first table. it must be 'horizontal' and format 0. if (!info->kern) return 0; if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 return 0; if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format return 0; length = ttUSHORT(data+10); if (table_length < length) length = table_length; for (k = 0; k < length; k++) { table[k].glyph1 = ttUSHORT(data+18+(k*6)); table[k].glyph2 = ttUSHORT(data+20+(k*6)); table[k].advance = ttSHORT(data+22+(k*6)); } return length; } static int stbtt__GetGlyphKernInfoAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2) { uint8_t *data = info->data + info->kern; uint32_t needle, straw; int l, r, m; // we only look at the first table. it must be 'horizontal' and format 0. if (!info->kern) return 0; if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 return 0; if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format return 0; l = 0; r = ttUSHORT(data+10) - 1; needle = glyph1 << 16 | glyph2; while (l <= r) { m = (l + r) >> 1; straw = ttULONG(data+18+(m*6)); // note: unaligned read if (needle < straw) r = m - 1; else if (needle > straw) l = m + 1; else return ttSHORT(data+22+(m*6)); } return 0; } static int32_t stbtt__GetCoverageIndex(uint8_t *coverageTable, int glyph) { uint16_t coverageFormat = ttUSHORT(coverageTable); switch (coverageFormat) { case 1: { uint16_t glyphCount = ttUSHORT(coverageTable + 2); // Binary search. int32_t l=0, r=glyphCount-1, m; int straw, needle=glyph; while (l <= r) { uint8_t *glyphArray = coverageTable + 4; uint16_t glyphID; m = (l + r) >> 1; glyphID = ttUSHORT(glyphArray + 2 * m); straw = glyphID; if (needle < straw) r = m - 1; else if (needle > straw) l = m + 1; else { return m; } } break; } case 2: { uint16_t rangeCount = ttUSHORT(coverageTable + 2); uint8_t *rangeArray = coverageTable + 4; // Binary search. int32_t l=0, r=rangeCount-1, m; int strawStart, strawEnd, needle=glyph; while (l <= r) { uint8_t *rangeRecord; m = (l + r) >> 1; rangeRecord = rangeArray + 6 * m; strawStart = ttUSHORT(rangeRecord); strawEnd = ttUSHORT(rangeRecord + 2); if (needle < strawStart) r = m - 1; else if (needle > strawEnd) l = m + 1; else { uint16_t startCoverageIndex = ttUSHORT(rangeRecord + 4); return startCoverageIndex + glyph - strawStart; } } break; } default: return -1; // unsupported } return -1; } static int32_t stbtt__GetGlyphClass(uint8_t *classDefTable, int glyph) { uint16_t classDefFormat = ttUSHORT(classDefTable); switch (classDefFormat) { case 1: { uint16_t startGlyphID = ttUSHORT(classDefTable + 2); uint16_t glyphCount = ttUSHORT(classDefTable + 4); uint8_t *classDef1ValueArray = classDefTable + 6; if (glyph >= startGlyphID && glyph < startGlyphID + glyphCount) return (int32_t)ttUSHORT(classDef1ValueArray + 2 * (glyph - startGlyphID)); break; } case 2: { uint16_t classRangeCount = ttUSHORT(classDefTable + 2); uint8_t *classRangeRecords = classDefTable + 4; // Binary search. int32_t l=0, r=classRangeCount-1, m; int strawStart, strawEnd, needle=glyph; while (l <= r) { uint8_t *classRangeRecord; m = (l + r) >> 1; classRangeRecord = classRangeRecords + 6 * m; strawStart = ttUSHORT(classRangeRecord); strawEnd = ttUSHORT(classRangeRecord + 2); if (needle < strawStart) r = m - 1; else if (needle > strawEnd) l = m + 1; else return (int32_t)ttUSHORT(classRangeRecord + 4); } break; } default: return -1; // Unsupported definition type, return an error. } // "All glyphs not assigned to a class fall into class 0". (OpenType spec) return 0; } // Define to ASSERT(x) if you want to break on unimplemented formats. static int32_t stbtt__GetGlyphGPOSInfoAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2) { uint16_t lookupListOffset; uint8_t *lookupList; uint16_t lookupCount; uint8_t *data; int32_t i, sti; if (!info->gpos) return 0; data = info->data + info->gpos; if (ttUSHORT(data+0) != 1) return 0; // Major version 1 if (ttUSHORT(data+2) != 0) return 0; // Minor version 0 lookupListOffset = ttUSHORT(data+8); lookupList = data + lookupListOffset; lookupCount = ttUSHORT(lookupList); for (i=0; i<lookupCount; ++i) { uint16_t lookupOffset = ttUSHORT(lookupList + 2 + 2 * i); uint8_t *lookupTable = lookupList + lookupOffset; uint16_t lookupType = ttUSHORT(lookupTable); uint16_t subTableCount = ttUSHORT(lookupTable + 4); uint8_t *subTableOffsets = lookupTable + 6; if (lookupType != 2) // Pair Adjustment Positioning Subtable continue; for (sti=0; sti<subTableCount; sti++) { uint16_t subtableOffset = ttUSHORT(subTableOffsets + 2 * sti); uint8_t *table = lookupTable + subtableOffset; uint16_t posFormat = ttUSHORT(table); uint16_t coverageOffset = ttUSHORT(table + 2); int32_t coverageIndex = stbtt__GetCoverageIndex(table + coverageOffset, glyph1); if (coverageIndex == -1) continue; switch (posFormat) { case 1: { int32_t l, r, m; int straw, needle; uint16_t valueFormat1 = ttUSHORT(table + 4); uint16_t valueFormat2 = ttUSHORT(table + 6); if (valueFormat1 == 4 && valueFormat2 == 0) { // Support more formats? int32_t valueRecordPairSizeInBytes = 2; uint16_t pairSetCount = ttUSHORT(table + 8); uint16_t pairPosOffset = ttUSHORT(table + 10 + 2 * coverageIndex); uint8_t *pairValueTable = table + pairPosOffset; uint16_t pairValueCount = ttUSHORT(pairValueTable); uint8_t *pairValueArray = pairValueTable + 2; if (coverageIndex >= pairSetCount) return 0; needle=glyph2; r=pairValueCount-1; l=0; // Binary search. while (l <= r) { uint16_t secondGlyph; uint8_t *pairValue; m = (l + r) >> 1; pairValue = pairValueArray + (2 + valueRecordPairSizeInBytes) * m; secondGlyph = ttUSHORT(pairValue); straw = secondGlyph; if (needle < straw) r = m - 1; else if (needle > straw) l = m + 1; else { int16_t xAdvance = ttSHORT(pairValue + 2); return xAdvance; } } } else return 0; break; } case 2: { uint16_t valueFormat1 = ttUSHORT(table + 4); uint16_t valueFormat2 = ttUSHORT(table + 6); if (valueFormat1 == 4 && valueFormat2 == 0) { // Support more formats? uint16_t classDef1Offset = ttUSHORT(table + 8); uint16_t classDef2Offset = ttUSHORT(table + 10); int glyph1class = stbtt__GetGlyphClass(table + classDef1Offset, glyph1); int glyph2class = stbtt__GetGlyphClass(table + classDef2Offset, glyph2); uint16_t class1Count = ttUSHORT(table + 12); uint16_t class2Count = ttUSHORT(table + 14); uint8_t *class1Records, *class2Records; int16_t xAdvance; if (glyph1class < 0 || glyph1class >= class1Count) return 0; // malformed if (glyph2class < 0 || glyph2class >= class2Count) return 0; // malformed class1Records = table + 16; class2Records = class1Records + 2 * (glyph1class * class2Count); xAdvance = ttSHORT(class2Records + 2 * glyph2class); return xAdvance; } else return 0; break; } default: return 0; // Unsupported position format } } } return 0; } int stbtt_GetGlyphKernAdvance(const stbtt_fontinfo *info, int g1, int g2) { int xAdvance = 0; if (info->gpos) xAdvance += stbtt__GetGlyphGPOSInfoAdvance(info, g1, g2); else if (info->kern) xAdvance += stbtt__GetGlyphKernInfoAdvance(info, g1, g2); return xAdvance; } // an additional amount to add to the 'advance' value between ch1 and ch2 int stbtt_GetCodepointKernAdvance(const stbtt_fontinfo *info, int ch1, int ch2) { if (!info->kern && !info->gpos) // if no kerning table, don't waste time looking up both codepoint->glyphs return 0; return stbtt_GetGlyphKernAdvance(info, stbtt_FindGlyphIndex(info,ch1), stbtt_FindGlyphIndex(info,ch2)); } // leftSideBearing is the offset from the current horizontal position to the left edge of the character // advanceWidth is the offset from the current horizontal position to the next horizontal position // these are expressed in unscaled coordinates void stbtt_GetCodepointHMetrics(const stbtt_fontinfo *info, int codepoint, int *advanceWidth, int *leftSideBearing) { stbtt_GetGlyphHMetrics(info, stbtt_FindGlyphIndex(info,codepoint), advanceWidth, leftSideBearing); } // ascent is the coordinate above the baseline the font extends; descent // is the coordinate below the baseline the font extends (i.e. it is typically negative) // lineGap is the spacing between one row's descent and the next row's ascent... // so you should advance the vertical position by "*ascent - *descent + *lineGap" // these are expressed in unscaled coordinates, so you must multiply by // the scale factor for a given size void stbtt_GetFontVMetrics(const stbtt_fontinfo *info, int *ascent, int *descent, int *lineGap) { if (ascent ) *ascent = ttSHORT(info->data+info->hhea + 4); if (descent) *descent = ttSHORT(info->data+info->hhea + 6); if (lineGap) *lineGap = ttSHORT(info->data+info->hhea + 8); } // analogous to GetFontVMetrics, but returns the "typographic" values from the OS/2 // table (specific to MS/Windows TTF files). // // Returns 1 on success (table present), 0 on failure. int stbtt_GetFontVMetricsOS2(const stbtt_fontinfo *info, int *typoAscent, int *typoDescent, int *typoLineGap) { int tab = stbtt__find_table(info->data, info->fontstart, "OS/2"); if (!tab) return 0; if (typoAscent ) *typoAscent = ttSHORT(info->data+tab + 68); if (typoDescent) *typoDescent = ttSHORT(info->data+tab + 70); if (typoLineGap) *typoLineGap = ttSHORT(info->data+tab + 72); return 1; } // the bounding box around all possible characters void stbtt_GetFontBoundingBox(const stbtt_fontinfo *info, int *x0, int *y0, int *x1, int *y1) { *x0 = ttSHORT(info->data + info->head + 36); *y0 = ttSHORT(info->data + info->head + 38); *x1 = ttSHORT(info->data + info->head + 40); *y1 = ttSHORT(info->data + info->head + 42); } // computes a scale factor to produce a font whose "height" is 'pixels' tall. // Height is measured as the distance from the highest ascender to the lowest // descender; in other words, it's equivalent to calling stbtt_GetFontVMetrics // and computing: // scale = pixels / (ascent - descent) // so if you prefer to measure height by the ascent only, use a similar calculation. float stbtt_ScaleForPixelHeight(const stbtt_fontinfo *info, float height) { int fheight = ttSHORT(info->data + info->hhea + 4) - ttSHORT(info->data + info->hhea + 6); return (float) height / fheight; } // computes a scale factor to produce a font whose EM size is mapped to // 'pixels' tall. This is probably what traditional APIs compute, but // I'm not positive. float stbtt_ScaleForMappingEmToPixels(const stbtt_fontinfo *info, float pixels) { int unitsPerEm = ttUSHORT(info->data + info->head + 18); return pixels / unitsPerEm; } // frees the data allocated above void stbtt_FreeShape(const stbtt_fontinfo *info, stbtt_vertex *v) { STBTT_free(v, info->userdata); } uint8_t *stbtt_FindSVGDoc(const stbtt_fontinfo *info, int gl) { int i; uint8_t *data = info->data; uint8_t *svg_doc_list = data + stbtt__get_svg((stbtt_fontinfo *) info); int numEntries = ttUSHORT(svg_doc_list); uint8_t *svg_docs = svg_doc_list + 2; for(i=0; i<numEntries; i++) { uint8_t *svg_doc = svg_docs + (12 * i); if ((gl >= ttUSHORT(svg_doc)) && (gl <= ttUSHORT(svg_doc + 2))) return svg_doc; } return 0; } // fills svg with the character's SVG data. // returns data size or 0 if SVG not found. int stbtt_GetGlyphSVG(const stbtt_fontinfo *info, int gl, const char **svg) { uint8_t *data = info->data; uint8_t *svg_doc; if (info->svg == 0) return 0; svg_doc = stbtt_FindSVGDoc(info, gl); if (svg_doc != NULL) { *svg = (char *) data + info->svg + ttULONG(svg_doc + 4); return ttULONG(svg_doc + 8); } else { return 0; } } int stbtt_GetCodepointSVG(const stbtt_fontinfo *info, int unicode_codepoint, const char **svg) { return stbtt_GetGlyphSVG(info, stbtt_FindGlyphIndex(info, unicode_codepoint), svg); } ////////////////////////////////////////////////////////////////////////////// // // antialiasing software rasterizer // void stbtt_GetGlyphBitmapBoxSubpixel(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y, float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1) { int x0=0,y0=0,x1,y1; // =0 suppresses compiler warning if (!stbtt_GetGlyphBox(font, glyph, &x0,&y0,&x1,&y1)) { // e.g. space character if (ix0) *ix0 = 0; if (iy0) *iy0 = 0; if (ix1) *ix1 = 0; if (iy1) *iy1 = 0; } else { // move to integral bboxes (treating pixels as little squares, what pixels get touched)? if (ix0) *ix0 = STBTT_ifloor( x0 * scale_x + shift_x); if (iy0) *iy0 = STBTT_ifloor(-y1 * scale_y + shift_y); if (ix1) *ix1 = STBTT_iceil ( x1 * scale_x + shift_x); if (iy1) *iy1 = STBTT_iceil (-y0 * scale_y + shift_y); } } void stbtt_GetGlyphBitmapBox(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1) { stbtt_GetGlyphBitmapBoxSubpixel(font, glyph, scale_x, scale_y,0.0f,0.0f, ix0, iy0, ix1, iy1); } // same as stbtt_GetCodepointBitmapBox, but you can specify a subpixel // shift for the character void stbtt_GetCodepointBitmapBoxSubpixel(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1) { stbtt_GetGlyphBitmapBoxSubpixel(font, stbtt_FindGlyphIndex(font,codepoint), scale_x, scale_y,shift_x,shift_y, ix0,iy0,ix1,iy1); } // get the bbox of the bitmap centered around the glyph origin; so the // bitmap width is ix1-ix0, height is iy1-iy0, and location to place // the bitmap top left is (leftSideBearing*scale,iy0). // (Note that the bitmap uses y-increases-down, but the shape uses // y-increases-up, so CodepointBitmapBox and CodepointBox are inverted.) void stbtt_GetCodepointBitmapBox(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1) { stbtt_GetCodepointBitmapBoxSubpixel(font, codepoint, scale_x, scale_y,0.0f,0.0f, ix0,iy0,ix1,iy1); } ////////////////////////////////////////////////////////////////////////////// // // Rasterizer static void *stbtt__hheap_alloc(stbtt__hheap *hh, size_t size, void *userdata) { if (hh->first_free) { void *p = hh->first_free; hh->first_free = * (void **) p; return p; } else { if (hh->num_remaining_in_head_chunk == 0) { int count = (size < 32 ? 2000 : size < 128 ? 800 : 100); stbtt__hheap_chunk *c = STBTT_malloc(sizeof(stbtt__hheap_chunk) + size * count, userdata); if (c == NULL) return NULL; c->next = hh->head; hh->head = c; hh->num_remaining_in_head_chunk = count; } --hh->num_remaining_in_head_chunk; return (char *) (hh->head) + sizeof(stbtt__hheap_chunk) + size * hh->num_remaining_in_head_chunk; } } static void stbtt__hheap_STBTT_free(stbtt__hheap *hh, void *p) { *(void **) p = hh->first_free; hh->first_free = p; } static void stbtt__hheap_cleanup(stbtt__hheap *hh, void *userdata) { stbtt__hheap_chunk *c = hh->head; while (c) { stbtt__hheap_chunk *n = c->next; STBTT_free(c, userdata); c = n; } } #if STBTT_RASTERIZER_VERSION == 1 #define STBTT_FIXSHIFT 10 #define STBTT_FIX (1 << STBTT_FIXSHIFT) #define STBTT_FIXMASK (STBTT_FIX-1) static stbtt__active_edge *stbtt__new_active(stbtt__hheap *hh, stbtt__edge *e, int off_x, float start_point, void *userdata) { stbtt__active_edge *z = (stbtt__active_edge *) stbtt__hheap_alloc(hh, sizeof(*z), userdata); float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0); ASSERT(z != NULL); if (!z) return z; // round dx down to avoid overshooting if (dxdy < 0) { z->dx = -STBTT_ifloor(STBTT_FIX * -dxdy); } else { z->dx = STBTT_ifloor(STBTT_FIX * dxdy); } z->x = STBTT_ifloor(STBTT_FIX * e->x0 + z->dx * (start_point - e->y0)); // use z->dx so when we offset later it's by the same amount z->x -= off_x * STBTT_FIX; z->ey = e->y1; z->next = 0; z->direction = e->invert ? 1 : -1; return z; } #elif STBTT_RASTERIZER_VERSION == 2 static stbtt__active_edge *stbtt__new_active(stbtt__hheap *hh, stbtt__edge *e, int off_x, float start_point, void *userdata) { stbtt__active_edge *z = (stbtt__active_edge *) stbtt__hheap_alloc(hh, sizeof(*z), userdata); float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0); ASSERT(z != NULL); //ASSERT(e->y0 <= start_point); if (!z) return z; z->fdx = dxdy; z->fdy = dxdy != 0.0f ? (1.0f/dxdy) : 0.0f; z->fx = e->x0 + dxdy * (start_point - e->y0); z->fx -= off_x; z->direction = e->invert ? 1.0f : -1.0f; z->sy = e->y0; z->ey = e->y1; z->next = 0; return z; } #else #error "Unrecognized value of STBTT_RASTERIZER_VERSION" #endif #if STBTT_RASTERIZER_VERSION == 1 // note: this routine clips fills that extend off the edges... ideally this // wouldn't happen, but it could happen if the truetype glyph bounding boxes // are wrong, or if the user supplies a too-small bitmap static void stbtt__fill_active_edges(unsigned char *scanline, int len, stbtt__active_edge *e, int max_weight) { // non-zero winding fill int x0=0, w=0; while (e) { if (w == 0) { // if we're currently at zero, we need to record the edge start point x0 = e->x; w += e->direction; } else { int x1 = e->x; w += e->direction; // if we went to zero, we need to draw if (w == 0) { int i = x0 >> STBTT_FIXSHIFT; int j = x1 >> STBTT_FIXSHIFT; if (i < len && j >= 0) { if (i == j) { // x0,x1 are the same pixel, so compute combined coverage scanline[i] = scanline[i] + (uint8_t) ((x1 - x0) * max_weight >> STBTT_FIXSHIFT); } else { if (i >= 0) // add antialiasing for x0 scanline[i] = scanline[i] + (uint8_t) (((STBTT_FIX - (x0 & STBTT_FIXMASK)) * max_weight) >> STBTT_FIXSHIFT); else i = -1; // clip if (j < len) // add antialiasing for x1 scanline[j] = scanline[j] + (uint8_t) (((x1 & STBTT_FIXMASK) * max_weight) >> STBTT_FIXSHIFT); else j = len; // clip for (++i; i < j; ++i) // fill pixels between x0 and x1 scanline[i] = scanline[i] + (uint8_t) max_weight; } } } } e = e->next; } } static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, int n, int vsubsample, int off_x, int off_y, void *userdata) { stbtt__hheap hh = { 0, 0, 0 }; stbtt__active_edge *active = NULL; int y,j=0; int max_weight = (255 / vsubsample); // weight per vertical scanline int s; // vertical subsample index unsigned char scanline_data[512], *scanline; if (result->w > 512) scanline = STBTT_malloc(result->w, userdata); else scanline = scanline_data; y = off_y * vsubsample; e[n].y0 = (off_y + result->h) * (float) vsubsample + 1; while (j < result->h) { memset(scanline, 0, result->w); for (s=0; s < vsubsample; ++s) { // find center of pixel for this scanline float scan_y = y + 0.5f; stbtt__active_edge **step = &active; // update all active edges; // remove all active edges that terminate before the center of this scanline while (*step) { stbtt__active_edge * z = *step; if (z->ey <= scan_y) { *step = z->next; // delete from list ASSERT(z->direction); z->direction = 0; stbtt__hheap_STBTT_free(&hh, z); } else { z->x += z->dx; // advance to position for current scanline step = &((*step)->next); // advance through list } } // resort the list if needed for(;;) { int changed=0; step = &active; while (*step && (*step)->next) { if ((*step)->x > (*step)->next->x) { stbtt__active_edge *t = *step; stbtt__active_edge *q = t->next; t->next = q->next; q->next = t; *step = q; changed = 1; } step = &(*step)->next; } if (!changed) break; } // insert all edges that start before the center of this scanline -- omit ones that also end on this scanline while (e->y0 <= scan_y) { if (e->y1 > scan_y) { stbtt__active_edge *z = stbtt__new_active(&hh, e, off_x, scan_y, userdata); if (z != NULL) { // find insertion point if (active == NULL) active = z; else if (z->x < active->x) { // insert at front z->next = active; active = z; } else { // find thing to insert AFTER stbtt__active_edge *p = active; while (p->next && p->next->x < z->x) p = p->next; // at this point, p->next->x is NOT < z->x z->next = p->next; p->next = z; } } } ++e; } // now process all active edges in XOR fashion if (active) stbtt__fill_active_edges(scanline, result->w, active, max_weight); ++y; } memcpy(result->pixels + j * result->stride, scanline, result->w); ++j; } stbtt__hheap_cleanup(&hh, userdata); if (scanline != scanline_data) STBTT_free(scanline, userdata); } #elif STBTT_RASTERIZER_VERSION == 2 // the edge passed in here does not cross the vertical line at x or the vertical line at x+1 // (i.e. it has already been clipped to those) static void stbtt__handle_clipped_edge(float *scanline, int x, stbtt__active_edge *e, float x0, float y0, float x1, float y1) { if (y0 == y1) return; ASSERT(y0 < y1); ASSERT(e->sy <= e->ey); if (y0 > e->ey) return; if (y1 < e->sy) return; if (y0 < e->sy) { x0 += (x1-x0) * (e->sy - y0) / (y1-y0); y0 = e->sy; } if (y1 > e->ey) { x1 += (x1-x0) * (e->ey - y1) / (y1-y0); y1 = e->ey; } if (x0 == x) { ASSERT(x1 <= x+1); } else if (x0 == x+1) { ASSERT(x1 >= x); } else if (x0 <= x) { ASSERT(x1 <= x); } else if (x0 >= x+1) { ASSERT(x1 >= x+1); } else { ASSERT(x1 >= x && x1 <= x+1); } if (x0 <= x && x1 <= x) { scanline[x] += e->direction * (y1-y0); } else if (!(x0 >= x+1 && x1 >= x+1)) { ASSERT(x0 >= x && x0 <= x+1 && x1 >= x && x1 <= x+1); scanline[x] += e->direction * (y1-y0) * (1-((x0-x)+(x1-x))/2); // coverage = 1 - average x position } } static float stbtt__sized_trapezoid_area(float height, float top_width, float bottom_width) { ASSERT(top_width >= 0); ASSERT(bottom_width >= 0); return (top_width + bottom_width) / 2.0f * height; } static float stbtt__position_trapezoid_area(float height, float tx0, float tx1, float bx0, float bx1) { return stbtt__sized_trapezoid_area(height, tx1 - tx0, bx1 - bx0); } static float stbtt__sized_triangle_area(float height, float width) { return height * width / 2; } static void stbtt__fill_active_edges_new(float *scanline, float *scanline_fill, int len, stbtt__active_edge *e, float y_top) { float y_bottom = y_top+1; while (e) { // brute force every pixel // compute intersection points with top & bottom ASSERT(e->ey >= y_top); if (e->fdx == 0) { float x0 = e->fx; if (x0 < len) { if (x0 >= 0) { stbtt__handle_clipped_edge(scanline,(int) x0,e, x0,y_top, x0,y_bottom); stbtt__handle_clipped_edge(scanline_fill-1,(int) x0+1,e, x0,y_top, x0,y_bottom); } else { stbtt__handle_clipped_edge(scanline_fill-1,0,e, x0,y_top, x0,y_bottom); } } } else { float x0 = e->fx; float dx = e->fdx; float xb = x0 + dx; float x_top, x_bottom; float sy0,sy1; float dy = e->fdy; ASSERT(e->sy <= y_bottom && e->ey >= y_top); // compute endpoints of line segment clipped to this scanline (if the // line segment starts on this scanline. x0 is the intersection of the // line with y_top, but that may be off the line segment. if (e->sy > y_top) { x_top = x0 + dx * (e->sy - y_top); sy0 = e->sy; } else { x_top = x0; sy0 = y_top; } if (e->ey < y_bottom) { x_bottom = x0 + dx * (e->ey - y_top); sy1 = e->ey; } else { x_bottom = xb; sy1 = y_bottom; } if (x_top >= 0 && x_bottom >= 0 && x_top < len && x_bottom < len) { // from here on, we don't have to range check x values if ((int) x_top == (int) x_bottom) { float height; // simple case, only spans one pixel int x = (int) x_top; height = (sy1 - sy0) * e->direction; ASSERT(x >= 0 && x < len); scanline[x] += stbtt__position_trapezoid_area(height, x_top, x+1.0f, x_bottom, x+1.0f); scanline_fill[x] += height; // everything right of this pixel is filled } else { int x,x1,x2; float y_crossing, y_final, step, sign, area; // covers 2+ pixels if (x_top > x_bottom) { // flip scanline vertically; signed area is the same float t; sy0 = y_bottom - (sy0 - y_top); sy1 = y_bottom - (sy1 - y_top); t = sy0, sy0 = sy1, sy1 = t; t = x_bottom, x_bottom = x_top, x_top = t; dx = -dx; dy = -dy; t = x0, x0 = xb, xb = t; } ASSERT(dy >= 0); ASSERT(dx >= 0); x1 = (int) x_top; x2 = (int) x_bottom; // compute intersection with y axis at x1+1 y_crossing = y_top + dy * (x1+1 - x0); // compute intersection with y axis at x2 y_final = y_top + dy * (x2 - x0); // x1 x_top x2 x_bottom // y_top +------|-----+------------+------------+--------|---+------------+ // | | | | | | // | | | | | | // sy0 | Txxxxx|............|............|............|............| // y_crossing | *xxxxx.......|............|............|............| // | | xxxxx..|............|............|............| // | | /- xx*xxxx........|............|............| // | | dy < | xxxxxx..|............|............| // y_final | | \- | xx*xxx.........|............| // sy1 | | | | xxxxxB...|............| // | | | | | | // | | | | | | // y_bottom +------------+------------+------------+------------+------------+ // // goal is to measure the area covered by '.' in each pixel // if x2 is right at the right edge of x1, y_crossing can blow up, github #1057 // @TODO: maybe test against sy1 rather than y_bottom? if (y_crossing > y_bottom) y_crossing = y_bottom; sign = e->direction; // area of the rectangle covered from sy0..y_crossing area = sign * (y_crossing-sy0); // area of the triangle (x_top,sy0), (x1+1,sy0), (x1+1,y_crossing) scanline[x1] += stbtt__sized_triangle_area(area, x1+1 - x_top); // check if final y_crossing is blown up; no test case for this if (y_final > y_bottom) { y_final = y_bottom; dy = (y_final - y_crossing ) / (x2 - (x1+1)); // if denom=0, y_final = y_crossing, so y_final <= y_bottom } // in second pixel, area covered by line segment found in first pixel // is always a rectangle 1 wide * the height of that line segment; this // is exactly what the variable 'area' stores. it also gets a contribution // from the line segment within it. the THIRD pixel will get the first // pixel's rectangle contribution, the second pixel's rectangle contribution, // and its own contribution. the 'own contribution' is the same in every pixel except // the leftmost and rightmost, a trapezoid that slides down in each pixel. // the second pixel's contribution to the third pixel will be the // rectangle 1 wide times the height change in the second pixel, which is dy. step = sign * dy * 1; // dy is dy/dx, change in y for every 1 change in x, // which multiplied by 1-pixel-width is how much pixel area changes for each step in x // so the area advances by 'step' every time for (x = x1+1; x < x2; ++x) { scanline[x] += area + step/2; // area of trapezoid is 1*step/2 area += step; } ASSERT(fabs(area) <= 1.01f); // accumulated error from area += step unless we round step down ASSERT(sy1 > y_final-0.01f); // area covered in the last pixel is the rectangle from all the pixels to the left, // plus the trapezoid filled by the line segment in this pixel all the way to the right edge scanline[x2] += area + sign * stbtt__position_trapezoid_area(sy1-y_final, (float) x2, x2+1.0f, x_bottom, x2+1.0f); // the rest of the line is filled based on the total height of the line segment in this pixel scanline_fill[x2] += sign * (sy1-sy0); } } else { // if edge goes outside of box we're drawing, we require // clipping logic. since this does not match the intended use // of this library, we use a different, very slow brute // force implementation // note though that this does happen some of the time because // x_top and x_bottom can be extrapolated at the top & bottom of // the shape and actually lie outside the bounding box int x; for (x=0; x < len; ++x) { // cases: // // there can be up to two intersections with the pixel. any intersection // with left or right edges can be handled by splitting into two (or three) // regions. intersections with top & bottom do not necessitate case-wise logic. // // the old way of doing this found the intersections with the left & right edges, // then used some simple logic to produce up to three segments in sorted order // from top-to-bottom. however, this had a problem: if an x edge was epsilon // across the x border, then the corresponding y position might not be distinct // from the other y segment, and it might ignored as an empty segment. to avoid // that, we need to explicitly produce segments based on x positions. // rename variables to clearly-defined pairs float y0 = y_top; float x1 = (float) (x); float x2 = (float) (x+1); float x3 = xb; float y3 = y_bottom; // x = e->x + e->dx * (y-y_top) // (y-y_top) = (x - e->x) / e->dx // y = (x - e->x) / e->dx + y_top float y1 = (x - x0) / dx + y_top; float y2 = (x+1 - x0) / dx + y_top; if (x0 < x1 && x3 > x2) { // three segments descending down-right stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x2,y2); stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); } else if (x3 < x1 && x0 > x2) { // three segments descending down-left stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x1,y1); stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); } else if (x0 < x1 && x3 > x1) { // two segments across x, down-right stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); } else if (x3 < x1 && x0 > x1) { // two segments across x, down-left stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); } else if (x0 < x2 && x3 > x2) { // two segments across x+1, down-right stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); } else if (x3 < x2 && x0 > x2) { // two segments across x+1, down-left stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); } else { // one segment stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x3,y3); } } } } e = e->next; } } // directly AA rasterize edges w/o supersampling static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, int n, int vsubsample, int off_x, int off_y, void *userdata) { stbtt__hheap hh = { 0, 0, 0 }; stbtt__active_edge *active = NULL; int y,j=0, i; float scanline_data[129], *scanline, *scanline2; if (result->w > 64) scanline = STBTT_malloc((result->w*2+1) * sizeof(float), userdata); else scanline = scanline_data; scanline2 = scanline + result->w; y = off_y; e[n].y0 = (float) (off_y + result->h) + 1; while (j < result->h) { // find center of pixel for this scanline float scan_y_top = y + 0.0f; float scan_y_bottom = y + 1.0f; stbtt__active_edge **step = &active; memset(scanline , 0, result->w*sizeof(scanline[0])); memset(scanline2, 0, (result->w+1)*sizeof(scanline[0])); // update all active edges; // remove all active edges that terminate before the top of this scanline while (*step) { stbtt__active_edge * z = *step; if (z->ey <= scan_y_top) { *step = z->next; // delete from list ASSERT(z->direction); z->direction = 0; stbtt__hheap_STBTT_free(&hh, z); } else { step = &((*step)->next); // advance through list } } // insert all edges that start before the bottom of this scanline while (e->y0 <= scan_y_bottom) { if (e->y0 != e->y1) { stbtt__active_edge *z = stbtt__new_active(&hh, e, off_x, scan_y_top, userdata); if (z != NULL) { if (j == 0 && off_y != 0) { if (z->ey < scan_y_top) { // this can happen due to subpixel positioning and some kind of fp rounding error i think z->ey = scan_y_top; } } ASSERT(z->ey >= scan_y_top); // if we get really unlucky a tiny bit of an edge can be out of bounds // insert at front z->next = active; active = z; } } ++e; } // now process all active edges if (active) stbtt__fill_active_edges_new(scanline, scanline2+1, result->w, active, scan_y_top); { float sum = 0; for (i=0; i < result->w; ++i) { float k; int m; sum += scanline2[i]; k = scanline[i] + sum; k = (float) fabs(k)*255 + 0.5f; m = (int) k; if (m > 255) m = 255; result->pixels[j*result->stride + i] = (unsigned char) m; } } // advance all the edges step = &active; while (*step) { stbtt__active_edge *z = *step; z->fx += z->fdx; // advance to position for current scanline step = &((*step)->next); // advance through list } ++y; ++j; } stbtt__hheap_cleanup(&hh, userdata); if (scanline != scanline_data) STBTT_free(scanline, userdata); } #else #error "Unrecognized value of STBTT_RASTERIZER_VERSION" #endif #define STBTT__COMPARE(a,b) ((a)->y0 < (b)->y0) static void stbtt__sort_edges_ins_sort(stbtt__edge *p, int n) { int i,j; for (i=1; i < n; ++i) { stbtt__edge t = p[i], *a = &t; j = i; while (j > 0) { stbtt__edge *b = &p[j-1]; int c = STBTT__COMPARE(a,b); if (!c) break; p[j] = p[j-1]; --j; } if (i != j) p[j] = t; } } static void stbtt__sort_edges_quicksort(stbtt__edge *p, int n) { /* threshold for transitioning to insertion sort */ while (n > 12) { stbtt__edge t; int c01,c12,c,m,i,j; /* compute median of three */ m = n >> 1; c01 = STBTT__COMPARE(&p[0],&p[m]); c12 = STBTT__COMPARE(&p[m],&p[n-1]); /* if 0 >= mid >= end, or 0 < mid < end, then use mid */ if (c01 != c12) { /* otherwise, we'll need to swap something else to middle */ int z; c = STBTT__COMPARE(&p[0],&p[n-1]); /* 0>mid && mid<n: 0>n => n; 0<n => 0 */ /* 0<mid && mid>n: 0>n => 0; 0<n => n */ z = (c == c12) ? 0 : n-1; t = p[z]; p[z] = p[m]; p[m] = t; } /* now p[m] is the median-of-three */ /* swap it to the beginning so it won't move around */ t = p[0]; p[0] = p[m]; p[m] = t; /* partition loop */ i=1; j=n-1; for(;;) { /* handling of equality is crucial here */ /* for sentinels & efficiency with duplicates */ for (;;++i) { if (!STBTT__COMPARE(&p[i], &p[0])) break; } for (;;--j) { if (!STBTT__COMPARE(&p[0], &p[j])) break; } /* make sure we haven't crossed */ if (i >= j) break; t = p[i]; p[i] = p[j]; p[j] = t; ++i; --j; } /* recurse on smaller side, iterate on larger */ if (j < (n-i)) { stbtt__sort_edges_quicksort(p,j); p = p+i; n = n-i; } else { stbtt__sort_edges_quicksort(p+i, n-i); n = j; } } } static void stbtt__sort_edges(stbtt__edge *p, int n) { stbtt__sort_edges_quicksort(p, n); stbtt__sort_edges_ins_sort(p, n); } static void stbtt__rasterize(stbtt__bitmap *result, stbtt__point *pts, int *wcount, int windings, float scale_x, float scale_y, float shift_x, float shift_y, int off_x, int off_y, int invert, void *userdata) { float y_scale_inv = invert ? -scale_y : scale_y; stbtt__edge *e; int n,i,j,k,m; #if STBTT_RASTERIZER_VERSION == 1 int vsubsample = result->h < 8 ? 15 : 5; #elif STBTT_RASTERIZER_VERSION == 2 int vsubsample = 1; #else #error "Unrecognized value of STBTT_RASTERIZER_VERSION" #endif // vsubsample should divide 255 evenly; otherwise we won't reach full opacity // now we have to blow out the windings into explicit edge lists n = 0; for (i=0; i < windings; ++i) n += wcount[i]; e = STBTT_malloc(sizeof(*e) * (n+1), userdata); // add an extra one as a sentinel if (e == 0) return; n = 0; m=0; for (i=0; i < windings; ++i) { stbtt__point *p = pts + m; m += wcount[i]; j = wcount[i]-1; for (k=0; k < wcount[i]; j=k++) { int a=k,b=j; // skip the edge if horizontal if (p[j].y == p[k].y) continue; // add edge from j to k to the list e[n].invert = 0; if (invert ? p[j].y > p[k].y : p[j].y < p[k].y) { e[n].invert = 1; a=j,b=k; } e[n].x0 = p[a].x * scale_x + shift_x; e[n].y0 = (p[a].y * y_scale_inv + shift_y) * vsubsample; e[n].x1 = p[b].x * scale_x + shift_x; e[n].y1 = (p[b].y * y_scale_inv + shift_y) * vsubsample; ++n; } } // now sort the edges by their highest point (should snap to integer, and then by x) //STBTT_sort(e, n, sizeof(e[0]), stbtt__edge_compare); stbtt__sort_edges(e, n); // now, traverse the scanlines and find the intersections on each scanline, use xor winding rule stbtt__rasterize_sorted_edges(result, e, n, vsubsample, off_x, off_y, userdata); STBTT_free(e, userdata); } static void stbtt__add_point(stbtt__point *points, int n, float x, float y) { if (!points) return; // during first pass, it's unallocated points[n].x = x; points[n].y = y; } // tessellate until threshold p is happy... @TODO warped to compensate for non-linear stretching static int stbtt__tesselate_curve(stbtt__point *points, int *num_points, float x0, float y0, float x1, float y1, float x2, float y2, float objspace_flatness_squared, int n) { // midpoint float mx = (x0 + 2*x1 + x2)/4; float my = (y0 + 2*y1 + y2)/4; // versus directly drawn line float dx = (x0+x2)/2 - mx; float dy = (y0+y2)/2 - my; if (n > 16) // 65536 segments on one curve better be enough! return 1; if (dx*dx+dy*dy > objspace_flatness_squared) { // half-pixel error allowed... need to be smaller if AA stbtt__tesselate_curve(points, num_points, x0,y0, (x0+x1)/2.0f,(y0+y1)/2.0f, mx,my, objspace_flatness_squared,n+1); stbtt__tesselate_curve(points, num_points, mx,my, (x1+x2)/2.0f,(y1+y2)/2.0f, x2,y2, objspace_flatness_squared,n+1); } else { stbtt__add_point(points, *num_points,x2,y2); *num_points = *num_points+1; } return 1; } static void stbtt__tesselate_cubic(stbtt__point *points, int *num_points, float x0, float y0, float x1, float y1, float x2, float y2, float x3, float y3, float objspace_flatness_squared, int n) { // @TODO this "flatness" calculation is just made-up nonsense that seems to work well enough float dx0 = x1-x0; float dy0 = y1-y0; float dx1 = x2-x1; float dy1 = y2-y1; float dx2 = x3-x2; float dy2 = y3-y2; float dx = x3-x0; float dy = y3-y0; float longlen = (float) (sqrt(dx0*dx0+dy0*dy0)+sqrt(dx1*dx1+dy1*dy1)+sqrt(dx2*dx2+dy2*dy2)); float shortlen = (float) sqrt(dx*dx+dy*dy); float flatness_squared = longlen*longlen-shortlen*shortlen; if (n > 16) // 65536 segments on one curve better be enough! return; if (flatness_squared > objspace_flatness_squared) { float x01 = (x0+x1)/2; float y01 = (y0+y1)/2; float x12 = (x1+x2)/2; float y12 = (y1+y2)/2; float x23 = (x2+x3)/2; float y23 = (y2+y3)/2; float xa = (x01+x12)/2; float ya = (y01+y12)/2; float xb = (x12+x23)/2; float yb = (y12+y23)/2; float mx = (xa+xb)/2; float my = (ya+yb)/2; stbtt__tesselate_cubic(points, num_points, x0,y0, x01,y01, xa,ya, mx,my, objspace_flatness_squared,n+1); stbtt__tesselate_cubic(points, num_points, mx,my, xb,yb, x23,y23, x3,y3, objspace_flatness_squared,n+1); } else { stbtt__add_point(points, *num_points,x3,y3); *num_points = *num_points+1; } } // returns number of contours static stbtt__point *stbtt_FlattenCurves(stbtt_vertex *vertices, int num_verts, float objspace_flatness, int **contour_lengths, int *num_contours, void *userdata) { stbtt__point *points=0; int num_points=0; float objspace_flatness_squared = objspace_flatness * objspace_flatness; int i,n=0,start=0, pass; // count how many "moves" there are to get the contour count for (i=0; i < num_verts; ++i) if (vertices[i].type == STBTT_vmove) ++n; *num_contours = n; if (n == 0) return 0; *contour_lengths = STBTT_malloc(sizeof(**contour_lengths) * n, userdata); if (*contour_lengths == 0) { *num_contours = 0; return 0; } // make two passes through the points so we don't need to realloc for (pass=0; pass < 2; ++pass) { float x=0,y=0; if (pass == 1) { points = STBTT_malloc(num_points * sizeof(points[0]), userdata); if (points == NULL) goto error; } num_points = 0; n= -1; for (i=0; i < num_verts; ++i) { switch (vertices[i].type) { case STBTT_vmove: // start the next contour if (n >= 0) (*contour_lengths)[n] = num_points - start; ++n; start = num_points; x = vertices[i].x, y = vertices[i].y; stbtt__add_point(points, num_points++, x,y); break; case STBTT_vline: x = vertices[i].x, y = vertices[i].y; stbtt__add_point(points, num_points++, x, y); break; case STBTT_vcurve: stbtt__tesselate_curve(points, &num_points, x,y, vertices[i].cx, vertices[i].cy, vertices[i].x, vertices[i].y, objspace_flatness_squared, 0); x = vertices[i].x, y = vertices[i].y; break; case STBTT_vcubic: stbtt__tesselate_cubic(points, &num_points, x,y, vertices[i].cx, vertices[i].cy, vertices[i].cx1, vertices[i].cy1, vertices[i].x, vertices[i].y, objspace_flatness_squared, 0); x = vertices[i].x, y = vertices[i].y; break; } } (*contour_lengths)[n] = num_points - start; } return points; error: STBTT_free(points, userdata); STBTT_free(*contour_lengths, userdata); *contour_lengths = 0; *num_contours = 0; return NULL; } // Rasterizes shape with quadratic beziers into a bitmap. // // @param result 1-channel bitmap to draw into // @param flatness_in_pixels allowable error of curve in pixels // @param vertices array of vertices defining shape // @param num_verts number of vertices in above array // @param scale_x scale_y scale applied to input vertices // @param shift_x shift_y translation applied to input vertices // @param x_off y_off another translation applied to input // @param invert if non-zero, vertically flip shape // @param userdata context for to STBTT_MALLOC void stbtt_Rasterize(stbtt__bitmap *result, float flatness_in_pixels, stbtt_vertex *vertices, int num_verts, float scale_x, float scale_y, float shift_x, float shift_y, int x_off, int y_off, int invert, void *userdata) { float scale = scale_x > scale_y ? scale_y : scale_x; int winding_count = 0; int *winding_lengths = NULL; stbtt__point *windings = stbtt_FlattenCurves(vertices, num_verts, flatness_in_pixels / scale, &winding_lengths, &winding_count, userdata); if (windings) { stbtt__rasterize(result, windings, winding_lengths, winding_count, scale_x, scale_y, shift_x, shift_y, x_off, y_off, invert, userdata); STBTT_free(winding_lengths, userdata); STBTT_free(windings, userdata); } } // frees the bitmap allocated below void stbtt_FreeBitmap(unsigned char *bitmap, void *userdata) { STBTT_free(bitmap, userdata); } unsigned char *stbtt_GetGlyphBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int glyph, int *width, int *height, int *xoff, int *yoff) { int ix0, iy0, ix1, iy1; stbtt__bitmap gbm; stbtt_vertex *vertices; int num_verts = stbtt_GetGlyphShape(info, glyph, &vertices); if (scale_x == 0) scale_x = scale_y; if (scale_y == 0) { if (scale_x == 0) { STBTT_free(vertices, info->userdata); return NULL; } scale_y = scale_x; } stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale_x, scale_y, shift_x, shift_y, &ix0, &iy0, &ix1, &iy1); // now we get the size gbm.w = (ix1 - ix0); gbm.h = (iy1 - iy0); gbm.pixels = NULL; // in case we error if (width) *width = gbm.w; if (height) *height = gbm.h; if (xoff) *xoff = ix0; if (yoff) *yoff = iy0; if (gbm.w && gbm.h) { gbm.pixels = STBTT_malloc(gbm.w * gbm.h, info->userdata); if (gbm.pixels) { gbm.stride = gbm.w; stbtt_Rasterize(&gbm, 0.35f, vertices, num_verts, scale_x, scale_y, shift_x, shift_y, ix0, iy0, 1, info->userdata); } } STBTT_free(vertices, info->userdata); return gbm.pixels; } unsigned char *stbtt_GetGlyphBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int glyph, int *width, int *height, int *xoff, int *yoff) { return stbtt_GetGlyphBitmapSubpixel(info, scale_x, scale_y, 0.0f, 0.0f, glyph, width, height, xoff, yoff); } void stbtt_MakeGlyphBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int glyph) { int ix0, iy0; stbtt_vertex *vertices; int num_verts = stbtt_GetGlyphShape(info, glyph, &vertices); stbtt__bitmap gbm; stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale_x, scale_y, shift_x, shift_y, &ix0, &iy0, 0, 0); gbm.pixels = output; gbm.w = out_w; gbm.h = out_h; gbm.stride = out_stride; if (gbm.w && gbm.h) stbtt_Rasterize(&gbm, 0.35f, vertices, num_verts, scale_x, scale_y, shift_x, shift_y, ix0, iy0, 1, info->userdata); STBTT_free(vertices, info->userdata); } void stbtt_MakeGlyphBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int glyph) { stbtt_MakeGlyphBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, 0.0f, 0.0f, glyph); } // the same as stbtt_GetCodepoitnBitmap, but you can specify a subpixel // shift for the character void *stbtt_GetCodepointBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint, int *width, int *height, int *xoff, int *yoff) { return stbtt_GetGlyphBitmapSubpixel(info, scale_x, scale_y, shift_x, shift_y, stbtt_FindGlyphIndex(info, codepoint), width, height, xoff, yoff); } // same as stbtt_MakeCodepointBitmapSubpixel, but prefiltering // is performed (see stbtt_PackSetOversampling) void stbtt_MakeCodepointBitmapSubpixelPrefilter( const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int oversample_x, int oversample_y, float *sub_x, float *sub_y, int codepoint) { stbtt_MakeGlyphBitmapSubpixelPrefilter( info, output, out_w, out_h, out_stride, scale_x, scale_y, shift_x, shift_y, oversample_x, oversample_y, sub_x, sub_y, stbtt_FindGlyphIndex(info, codepoint)); } // same as stbtt_MakeCodepointBitmap, but you can specify a subpixel // shift for the character void stbtt_MakeCodepointBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint) { stbtt_MakeGlyphBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, shift_x, shift_y, stbtt_FindGlyphIndex(info, codepoint)); } // allocates a large-enough single-channel 8bpp bitmap and renders the // specified character/glyph at the specified scale into it, with // antialiasing. 0 is no coverage (transparent), 255 is fully covered (opaque). // *width & *height are filled out with the width & height of the bitmap, // which is stored left-to-right, top-to-bottom. // // xoff/yoff are the offset it pixel space from the glyph origin to the top-left // of the bitmap void *stbtt_GetCodepointBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int codepoint, int *width, int *height, int *xoff, int *yoff) { return stbtt_GetCodepointBitmapSubpixel(info, scale_x, scale_y, 0.0f, 0.0f, codepoint, width, height, xoff, yoff); } // the same as stbtt_GetCodepointBitmap, but you pass in storage for the bitmap // in the form of 'output', with row spacing of 'out_stride' bytes. the bitmap // is clipped to out_w/out_h bytes. Call stbtt_GetCodepointBitmapBox to get the // width and height and positioning info for it first. void stbtt_MakeCodepointBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int codepoint) { stbtt_MakeCodepointBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, 0.0f, 0.0f, codepoint); } ////////////////////////////////////////////////////////////////////////////// // // bitmap baking // // This is SUPER-CRAPPY packing to keep source code small // if return is positive, the first unused row of the bitmap // if return is negative, returns the negative of the number of characters that fit // if return is 0, no characters fit and no rows were used // This uses a very crappy packing. static int stbtt_BakeFontBitmap_internal(unsigned char *data, int offset, // font location (use offset=0 for plain .ttf) float pixel_height, // height of font in pixels unsigned char *pixels, int pw, int ph, // bitmap to be filled in int first_char, int num_chars, // characters to bake stbtt_bakedchar *chardata) { float scale; int x,y,bottom_y, i; stbtt_fontinfo f; f.userdata = NULL; if (!stbtt_InitFont(&f, data, offset)) return -1; memset(pixels, 0, pw*ph); // background of 0 around pixels x=y=1; bottom_y = 1; scale = stbtt_ScaleForPixelHeight(&f, pixel_height); for (i=0; i < num_chars; ++i) { int advance, lsb, x0,y0,x1,y1,gw,gh; int g = stbtt_FindGlyphIndex(&f, first_char + i); stbtt_GetGlyphHMetrics(&f, g, &advance, &lsb); stbtt_GetGlyphBitmapBox(&f, g, scale,scale, &x0,&y0,&x1,&y1); gw = x1-x0; gh = y1-y0; if (x + gw + 1 >= pw) y = bottom_y, x = 1; // advance to next row if (y + gh + 1 >= ph) // check if it fits vertically AFTER potentially moving to next row return -i; ASSERT(x+gw < pw); ASSERT(y+gh < ph); stbtt_MakeGlyphBitmap(&f, pixels+x+y*pw, gw,gh,pw, scale,scale, g); chardata[i].x0 = (int16_t) x; chardata[i].y0 = (int16_t) y; chardata[i].x1 = (int16_t) (x + gw); chardata[i].y1 = (int16_t) (y + gh); chardata[i].xadvance = scale * advance; chardata[i].xoff = (float) x0; chardata[i].yoff = (float) y0; x = x + gw + 1; if (y+gh+1 > bottom_y) bottom_y = y+gh+1; } return bottom_y; } // Call GetBakedQuad with char_index = 'character - first_char', and it // creates the quad you need to draw and advances the current position. // // The coordinate system used assumes y increases downwards. // // Characters will extend both above and below the current position; // see discussion of "BASELINE" above. // // It's inefficient; you might want to c&p it and optimize it. void stbtt_GetBakedQuad(const stbtt_bakedchar *chardata, int pw, int ph, int char_index, float *xpos, float *ypos, stbtt_aligned_quad *q, int opengl_fillrule) { float d3d_bias = opengl_fillrule ? 0 : -0.5f; float ipw = 1.0f / pw, iph = 1.0f / ph; const stbtt_bakedchar *b = chardata + char_index; int round_x = STBTT_ifloor((*xpos + b->xoff) + 0.5f); int round_y = STBTT_ifloor((*ypos + b->yoff) + 0.5f); q->x0 = round_x + d3d_bias; q->y0 = round_y + d3d_bias; q->x1 = round_x + b->x1 - b->x0 + d3d_bias; q->y1 = round_y + b->y1 - b->y0 + d3d_bias; q->s0 = b->x0 * ipw; q->t0 = b->y0 * iph; q->s1 = b->x1 * ipw; q->t1 = b->y1 * iph; *xpos += b->xadvance; } ////////////////////////////////////////////////////////////////////////////// // // bitmap baking // // This is SUPER-AWESOME (tm Ryan Gordon) packing using stb_rect_pack.h. If // stb_rect_pack.h isn't available, it uses the BakeFontBitmap strategy. // Initializes packing context stored in passed-in stbtt_pack_context. // Future calls using this context will pack characters into the bitmap // passed in here: a 1-channel bitmap that is width * height. // stride_in_bytes is the distance from one row to the next (or 0 to // mean they are packed tightly together). "padding" is the amount of // padding to leave between each character (normally you want '1' for // bitmaps you'll use as textures with bilinear filtering). // // Returns 0 on failure, 1 on success. int stbtt_PackBegin(stbtt_pack_context *spc, unsigned char *pixels, int pw, int ph, int stride_in_bytes, int padding, void *alloc_context) { stbrp_context *context = STBTT_malloc(sizeof(*context), alloc_context); int num_nodes = pw - padding; stbrp_node *nodes = STBTT_malloc(sizeof(*nodes) * num_nodes, alloc_context); if (!context || !nodes) { if (context) STBTT_free(context, alloc_context); if (nodes) STBTT_free(nodes, alloc_context); return 0; } spc->user_allocator_context = alloc_context; spc->width = pw; spc->height = ph; spc->pixels = pixels; spc->pack_info = context; spc->nodes = nodes; spc->padding = padding; spc->stride_in_bytes = stride_in_bytes != 0 ? stride_in_bytes : pw; spc->h_oversample = 1; spc->v_oversample = 1; spc->skip_missing = 0; stbrp_init_target(context, pw-padding, ph-padding, nodes, num_nodes); if (pixels) memset(pixels, 0, pw*ph); // background of 0 around pixels return 1; } // Cleans up the packing context and frees all memory. void stbtt_PackEnd(stbtt_pack_context *spc) { STBTT_free(spc->nodes , spc->user_allocator_context); STBTT_free(spc->pack_info, spc->user_allocator_context); } // Oversampling a font increases the quality by allowing higher-quality subpixel // positioning, and is especially valuable at smaller text sizes. // // This function sets the amount of oversampling for all following calls to // stbtt_PackFontRange(s) or stbtt_PackFontRangesGatherRects for a given // pack context. The default (no oversampling) is achieved by h_oversample=1 // and v_oversample=1. The total number of pixels required is // h_oversample*v_oversample larger than the default; for example, 2x2 // oversampling requires 4x the storage of 1x1. For best results, render // oversampled textures with bilinear filtering. Look at the readme in // stb/tests/oversample for information about oversampled fonts // // To use with PackFontRangesGather etc., you must set it before calls // call to PackFontRangesGatherRects. void stbtt_PackSetOversampling(stbtt_pack_context *spc, unsigned int h_oversample, unsigned int v_oversample) { ASSERT(h_oversample <= STBTT_MAX_OVERSAMPLE); ASSERT(v_oversample <= STBTT_MAX_OVERSAMPLE); if (h_oversample <= STBTT_MAX_OVERSAMPLE) spc->h_oversample = h_oversample; if (v_oversample <= STBTT_MAX_OVERSAMPLE) spc->v_oversample = v_oversample; } // If skip != 0, this tells stb_truetype to skip any codepoints for which // there is no corresponding glyph. If skip=0, which is the default, then // codepoints without a glyph recived the font's "missing character" glyph, // typically an empty box by convention. void stbtt_PackSetSkipMissingCodepoints(stbtt_pack_context *spc, int skip) { spc->skip_missing = skip; } static void stbtt__h_prefilter(unsigned char *pixels, int w, int h, int stride_in_bytes, unsigned int kernel_width) { unsigned char buffer[STBTT_MAX_OVERSAMPLE]; int safe_w = w - kernel_width; int j; memset(buffer, 0, STBTT_MAX_OVERSAMPLE); // suppress bogus warning from VS2013 -analyze for (j=0; j < h; ++j) { int i; unsigned int total = 0; memset(buffer, 0, kernel_width); // make kernel_width a constant in common cases so compiler can optimize out the divide switch (kernel_width) { case 2: for (i=0; i <= safe_w; ++i) { total += pixels[i] - buffer[i & STBTT__OVER_MASK]; buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; pixels[i] = total / 2; } break; case 3: for (i=0; i <= safe_w; ++i) { total += pixels[i] - buffer[i & STBTT__OVER_MASK]; buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; pixels[i] = total / 3; } break; case 4: for (i=0; i <= safe_w; ++i) { total += pixels[i] - buffer[i & STBTT__OVER_MASK]; buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; pixels[i] = total / 4; } break; case 5: for (i=0; i <= safe_w; ++i) { total += pixels[i] - buffer[i & STBTT__OVER_MASK]; buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; pixels[i] = total / 5; } break; default: for (i=0; i <= safe_w; ++i) { total += pixels[i] - buffer[i & STBTT__OVER_MASK]; buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; pixels[i] = total / kernel_width; } break; } for (; i < w; ++i) { ASSERT(pixels[i] == 0); total -= buffer[i & STBTT__OVER_MASK]; pixels[i] = total / kernel_width; } pixels += stride_in_bytes; } } static void stbtt__v_prefilter(unsigned char *pixels, int w, int h, int stride_in_bytes, unsigned int kernel_width) { unsigned char buffer[STBTT_MAX_OVERSAMPLE]; int safe_h = h - kernel_width; int j; memset(buffer, 0, STBTT_MAX_OVERSAMPLE); // suppress bogus warning from VS2013 -analyze for (j=0; j < w; ++j) { int i; unsigned int total; memset(buffer, 0, kernel_width); total = 0; // make kernel_width a constant in common cases so compiler can optimize out the divide switch (kernel_width) { case 2: for (i=0; i <= safe_h; ++i) { total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; pixels[i*stride_in_bytes] = total / 2; } break; case 3: for (i=0; i <= safe_h; ++i) { total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; pixels[i*stride_in_bytes] = total / 3; } break; case 4: for (i=0; i <= safe_h; ++i) { total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; pixels[i*stride_in_bytes] = total / 4; } break; case 5: for (i=0; i <= safe_h; ++i) { total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; pixels[i*stride_in_bytes] = total / 5; } break; default: for (i=0; i <= safe_h; ++i) { total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; pixels[i*stride_in_bytes] = total / kernel_width; } break; } for (; i < h; ++i) { ASSERT(pixels[i*stride_in_bytes] == 0); total -= buffer[i & STBTT__OVER_MASK]; pixels[i*stride_in_bytes] = total / kernel_width; } pixels += 1; } } static float stbtt__oversample_shift(int oversample) { if (!oversample) return 0.0f; // The prefilter is a box filter of width "oversample", // which shifts phase by (oversample - 1)/2 pixels in // oversampled space. We want to shift in the opposite // direction to counter this. return (float)-(oversample - 1) / (2.0f * (float)oversample); } // Calling these functions // // - stbtt_PackFontRangesGatherRects // - stbtt_PackFontRangesPackRects // - stbtt_PackFontRangesRenderIntoRects // // in sequence is roughly equivalent to calling stbtt_PackFontRanges(). // If you more control over the packing of multiple fonts, or if you // want to pack custom data into a font texture, take a look at the // source to of stbtt_PackFontRanges() and create a custom version using // these functions, e.g. call GatherRects multiple times, building up a // single array of rects, then call PackRects once, then call // RenderIntoRects repeatedly. This may result in a better packing than // calling PackFontRanges multiple times (or it may not). // // rects array must be big enough to accommodate all characters in the given ranges int stbtt_PackFontRangesGatherRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects) { float fh,scale; int i,j,k,x0,y0,x1,y1,glyph,codepoint,missing_glyph_added; for (missing_glyph_added=k=i=0; i < num_ranges; ++i) { if ((fh = ranges[i].font_size) > 0) { scale = stbtt_ScaleForPixelHeight(info, fh); } else { scale = stbtt_ScaleForMappingEmToPixels(info, -fh); } ranges[i].h_oversample = spc->h_oversample; ranges[i].v_oversample = spc->v_oversample; for (j=0; j < ranges[i].num_chars; ++j) { if (!ranges[i].array_of_unicode_codepoints) { codepoint = ranges[i].first_unicode_codepoint_in_range + j; } else { codepoint = ranges[i].array_of_unicode_codepoints[j]; } glyph = stbtt_FindGlyphIndex(info, codepoint); if (glyph == 0 && (spc->skip_missing || missing_glyph_added)) { rects[k].w = rects[k].h = 0; } else { stbtt_GetGlyphBitmapBoxSubpixel(info,glyph, scale * spc->h_oversample, scale * spc->v_oversample, 0,0, &x0,&y0,&x1,&y1); rects[k].w = x1-x0 + spc->padding + spc->h_oversample-1; rects[k].h = y1-y0 + spc->padding + spc->v_oversample-1; if (glyph == 0) missing_glyph_added = 1; } ++k; } } return k; } void stbtt_MakeGlyphBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int prefilter_x, int prefilter_y, float *sub_x, float *sub_y, int glyph) { stbtt_MakeGlyphBitmapSubpixel(info, output, out_w - (prefilter_x - 1), out_h - (prefilter_y - 1), out_stride, scale_x, scale_y, shift_x, shift_y, glyph); if (prefilter_x > 1) stbtt__h_prefilter(output, out_w, out_h, out_stride, prefilter_x); if (prefilter_y > 1) stbtt__v_prefilter(output, out_w, out_h, out_stride, prefilter_y); *sub_x = stbtt__oversample_shift(prefilter_x); *sub_y = stbtt__oversample_shift(prefilter_y); } // Calling these functions // // - stbtt_PackFontRangesGatherRects // - stbtt_PackFontRangesPackRects // - stbtt_PackFontRangesRenderIntoRects // // in sequence is roughly equivalent to calling stbtt_PackFontRanges(). // If you more control over the packing of multiple fonts, or if you // want to pack custom data into a font texture, take a look at the // source to of stbtt_PackFontRanges() and create a custom version using // these functions, e.g. call GatherRects multiple times, building up a // single array of rects, then call PackRects once, then call // RenderIntoRects repeatedly. This may result in a better packing than // calling PackFontRanges multiple times (or it may not). // // rects array must be big enough to accommodate all characters in the given ranges int stbtt_PackFontRangesRenderIntoRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects) { int i,j,k, missing_glyph = -1, return_value = 1; // save current values int old_h_over = spc->h_oversample; int old_v_over = spc->v_oversample; k = 0; for (i=0; i < num_ranges; ++i) { float fh = ranges[i].font_size; float scale = fh > 0 ? stbtt_ScaleForPixelHeight(info, fh) : stbtt_ScaleForMappingEmToPixels(info, -fh); float recip_h,recip_v,sub_x,sub_y; spc->h_oversample = ranges[i].h_oversample; spc->v_oversample = ranges[i].v_oversample; recip_h = 1.0f / spc->h_oversample; recip_v = 1.0f / spc->v_oversample; sub_x = stbtt__oversample_shift(spc->h_oversample); sub_y = stbtt__oversample_shift(spc->v_oversample); for (j=0; j < ranges[i].num_chars; ++j) { stbrp_rect *r = &rects[k]; if (r->was_packed && r->w != 0 && r->h != 0) { stbtt_packedchar *bc = &ranges[i].chardata_for_range[j]; int advance, lsb, x0,y0,x1,y1; int codepoint = ranges[i].array_of_unicode_codepoints == NULL ? ranges[i].first_unicode_codepoint_in_range + j : ranges[i].array_of_unicode_codepoints[j]; int glyph = stbtt_FindGlyphIndex(info, codepoint); stbrp_coord pad = (stbrp_coord) spc->padding; // pad on left and top r->x += pad; r->y += pad; r->w -= pad; r->h -= pad; stbtt_GetGlyphHMetrics(info, glyph, &advance, &lsb); stbtt_GetGlyphBitmapBox(info, glyph, scale * spc->h_oversample, scale * spc->v_oversample, &x0,&y0,&x1,&y1); stbtt_MakeGlyphBitmapSubpixel(info, spc->pixels + r->x + r->y*spc->stride_in_bytes, r->w - spc->h_oversample+1, r->h - spc->v_oversample+1, spc->stride_in_bytes, scale * spc->h_oversample, scale * spc->v_oversample, 0,0, glyph); if (spc->h_oversample > 1) stbtt__h_prefilter(spc->pixels + r->x + r->y*spc->stride_in_bytes, r->w, r->h, spc->stride_in_bytes, spc->h_oversample); if (spc->v_oversample > 1) stbtt__v_prefilter(spc->pixels + r->x + r->y*spc->stride_in_bytes, r->w, r->h, spc->stride_in_bytes, spc->v_oversample); bc->x0 = (int16_t) r->x; bc->y0 = (int16_t) r->y; bc->x1 = (int16_t) (r->x + r->w); bc->y1 = (int16_t) (r->y + r->h); bc->xadvance = scale * advance; bc->xoff = (float) x0 * recip_h + sub_x; bc->yoff = (float) y0 * recip_v + sub_y; bc->xoff2 = (x0 + r->w) * recip_h + sub_x; bc->yoff2 = (y0 + r->h) * recip_v + sub_y; if (glyph == 0) missing_glyph = j; } else if (spc->skip_missing) { return_value = 0; } else if (r->was_packed && r->w == 0 && r->h == 0 && missing_glyph >= 0) { ranges[i].chardata_for_range[j] = ranges[i].chardata_for_range[missing_glyph]; } else { return_value = 0; // if any fail, report failure } ++k; } } // restore original values spc->h_oversample = old_h_over; spc->v_oversample = old_v_over; return return_value; } void stbtt_PackFontRangesPackRects(stbtt_pack_context *spc, stbrp_rect *rects, int num_rects) { stbrp_pack_rects((stbrp_context *) spc->pack_info, rects, num_rects); } // Creates character bitmaps from multiple ranges of characters stored in // ranges. This will usually create a better-packed bitmap than multiple // calls to stbtt_PackFontRange. Note that you can call this multiple // times within a single PackBegin/PackEnd. int stbtt_PackFontRanges(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, stbtt_pack_range *ranges, int num_ranges) { stbtt_fontinfo info; int i,j,n, return_value = 1; //stbrp_context *context = (stbrp_context *) spc->pack_info; stbrp_rect *rects; // flag all characters as NOT packed for (i=0; i < num_ranges; ++i) for (j=0; j < ranges[i].num_chars; ++j) ranges[i].chardata_for_range[j].x0 = ranges[i].chardata_for_range[j].y0 = ranges[i].chardata_for_range[j].x1 = ranges[i].chardata_for_range[j].y1 = 0; n = 0; for (i=0; i < num_ranges; ++i) n += ranges[i].num_chars; rects = STBTT_malloc(sizeof(*rects) * n, spc->user_allocator_context); if (rects == NULL) return 0; info.userdata = spc->user_allocator_context; stbtt_InitFont(&info, fontdata, stbtt_GetFontOffsetForIndex(fontdata,font_index)); n = stbtt_PackFontRangesGatherRects(spc, &info, ranges, num_ranges, rects); stbtt_PackFontRangesPackRects(spc, rects, n); return_value = stbtt_PackFontRangesRenderIntoRects(spc, &info, ranges, num_ranges, rects); STBTT_free(rects, spc->user_allocator_context); return return_value; } // Creates character bitmaps from the font_index'th font found in fontdata (use // font_index=0 if you don't know what that is). It creates num_chars_in_range // bitmaps for characters with unicode values starting at first_unicode_char_in_range // and increasing. Data for how to render them is stored in chardata_for_range; // pass these to stbtt_GetPackedQuad to get back renderable quads. // // font_size is the full height of the character from ascender to descender, // as computed by stbtt_ScaleForPixelHeight. To use a point size as computed // by stbtt_ScaleForMappingEmToPixels, wrap the point size in STBTT_POINT_SIZE() // and pass that result as 'font_size': // ..., 20 , ... // font max minus min y is 20 pixels tall // ..., STBTT_POINT_SIZE(20), ... // 'M' is 20 pixels tall int stbtt_PackFontRange(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, float font_size, int first_unicode_codepoint_in_range, int num_chars_in_range, stbtt_packedchar *chardata_for_range) { stbtt_pack_range range; range.first_unicode_codepoint_in_range = first_unicode_codepoint_in_range; range.array_of_unicode_codepoints = NULL; range.num_chars = num_chars_in_range; range.chardata_for_range = chardata_for_range; range.font_size = font_size; return stbtt_PackFontRanges(spc, fontdata, font_index, &range, 1); } // Query the font vertical metrics without having to create a font first. void stbtt_GetScaledFontVMetrics(const unsigned char *fontdata, int index, float size, float *ascent, float *descent, float *lineGap) { float scale; stbtt_fontinfo info; int i_ascent, i_descent, i_lineGap; stbtt_InitFont(&info, fontdata, stbtt_GetFontOffsetForIndex(fontdata, index)); scale = size > 0 ? stbtt_ScaleForPixelHeight(&info, size) : stbtt_ScaleForMappingEmToPixels(&info, -size); stbtt_GetFontVMetrics(&info, &i_ascent, &i_descent, &i_lineGap); *ascent = i_ascent * scale; *descent = i_descent * scale; *lineGap = i_lineGap * scale; } void stbtt_GetPackedQuad(const stbtt_packedchar *chardata, int pw, int ph, int char_index, float *xpos, float *ypos, stbtt_aligned_quad *q, int align_to_integer) { float ipw = 1.0f / pw, iph = 1.0f / ph; const stbtt_packedchar *b = chardata + char_index; if (align_to_integer) { float x = STBTT_ifloor((*xpos + b->xoff) + .5f); float y = STBTT_ifloor((*ypos + b->yoff) + .5f); q->x0 = x; q->y0 = y; q->x1 = x + b->xoff2 - b->xoff; q->y1 = y + b->yoff2 - b->yoff; } else { q->x0 = *xpos + b->xoff; q->y0 = *ypos + b->yoff; q->x1 = *xpos + b->xoff2; q->y1 = *ypos + b->yoff2; } q->s0 = b->x0 * ipw; q->t0 = b->y0 * iph; q->s1 = b->x1 * ipw; q->t1 = b->y1 * iph; *xpos += b->xadvance; } ////////////////////////////////////////////////////////////////////////////// // // sdf computation // static int stbtt__ray_intersect_bezier(float orig[2], float ray[2], float q0[2], float q1[2], float q2[2], float hits[2][2]) { float q0perp = q0[1]*ray[0] - q0[0]*ray[1]; float q1perp = q1[1]*ray[0] - q1[0]*ray[1]; float q2perp = q2[1]*ray[0] - q2[0]*ray[1]; float roperp = orig[1]*ray[0] - orig[0]*ray[1]; float a = q0perp - 2*q1perp + q2perp; float b = q1perp - q0perp; float c = q0perp - roperp; float s0 = 0., s1 = 0.; int num_s = 0; if (a != 0.0) { float discr = b*b - a*c; if (discr > 0.0) { float rcpna = -1 / a; float d = (float) sqrt(discr); s0 = (b+d) * rcpna; s1 = (b-d) * rcpna; if (s0 >= 0.0 && s0 <= 1.0) num_s = 1; if (d > 0.0 && s1 >= 0.0 && s1 <= 1.0) { if (num_s == 0) s0 = s1; ++num_s; } } } else { // 2*b*s + c = 0 // s = -c / (2*b) s0 = c / (-2 * b); if (s0 >= 0.0 && s0 <= 1.0) num_s = 1; } if (num_s == 0) return 0; else { float rcp_len2 = 1 / (ray[0]*ray[0] + ray[1]*ray[1]); float rayn_x = ray[0] * rcp_len2, rayn_y = ray[1] * rcp_len2; float q0d = q0[0]*rayn_x + q0[1]*rayn_y; float q1d = q1[0]*rayn_x + q1[1]*rayn_y; float q2d = q2[0]*rayn_x + q2[1]*rayn_y; float rod = orig[0]*rayn_x + orig[1]*rayn_y; float q10d = q1d - q0d; float q20d = q2d - q0d; float q0rd = q0d - rod; hits[0][0] = q0rd + s0*(2.0f - 2.0f*s0)*q10d + s0*s0*q20d; hits[0][1] = a*s0+b; if (num_s > 1) { hits[1][0] = q0rd + s1*(2.0f - 2.0f*s1)*q10d + s1*s1*q20d; hits[1][1] = a*s1+b; return 2; } else { return 1; } } } static int equal(float *a, float *b) { return (a[0] == b[0] && a[1] == b[1]); } static int stbtt__compute_crossings_x(float x, float y, int nverts, stbtt_vertex *verts) { int i; float orig[2], ray[2] = { 1, 0 }; float y_frac; int winding = 0; // make sure y never passes through a vertex of the shape y_frac = (float) fmod(y, 1.0f); if (y_frac < 0.01f) { y += 0.01f; } else if (y_frac > 0.99f) { y -= 0.01f; } orig[0] = x; orig[1] = y; // test a ray from (-infinity,y) to (x,y) for (i=0; i < nverts; ++i) { if (verts[i].type == STBTT_vline) { int x0 = (int) verts[i-1].x, y0 = (int) verts[i-1].y; int x1 = (int) verts[i ].x, y1 = (int) verts[i ].y; if (y > MIN(y0,y1) && y < MAX(y0,y1) && x > MIN(x0,x1)) { float x_inter = (y - y0) / (y1 - y0) * (x1-x0) + x0; if (x_inter < x) winding += (y0 < y1) ? 1 : -1; } } if (verts[i].type == STBTT_vcurve) { int x0 = (int) verts[i-1].x , y0 = (int) verts[i-1].y ; int x1 = (int) verts[i ].cx, y1 = (int) verts[i ].cy; int x2 = (int) verts[i ].x , y2 = (int) verts[i ].y ; int ax = MIN(x0,MIN(x1,x2)), ay = MIN(y0,MIN(y1,y2)); int by = MAX(y0,MAX(y1,y2)); if (y > ay && y < by && x > ax) { float q0[2],q1[2],q2[2]; float hits[2][2]; q0[0] = (float)x0; q0[1] = (float)y0; q1[0] = (float)x1; q1[1] = (float)y1; q2[0] = (float)x2; q2[1] = (float)y2; if (equal(q0,q1) || equal(q1,q2)) { x0 = (int)verts[i-1].x; y0 = (int)verts[i-1].y; x1 = (int)verts[i ].x; y1 = (int)verts[i ].y; if (y > MIN(y0,y1) && y < MAX(y0,y1) && x > MIN(x0,x1)) { float x_inter = (y - y0) / (y1 - y0) * (x1-x0) + x0; if (x_inter < x) winding += (y0 < y1) ? 1 : -1; } } else { int num_hits = stbtt__ray_intersect_bezier(orig, ray, q0, q1, q2, hits); if (num_hits >= 1) if (hits[0][0] < 0) winding += (hits[0][1] < 0 ? -1 : 1); if (num_hits >= 2) if (hits[1][0] < 0) winding += (hits[1][1] < 0 ? -1 : 1); } } } } return winding; } static float stbtt__cuberoot( float x ) { if (x<0) return -powf(-x,1.0f/3.0f); else return powf( x,1.0f/3.0f); } // x^3 + a*x^2 + b*x + c = 0 static int stbtt__solve_cubic(float a, float b, float c, float* r) { float s = -a / 3; float p = b - a*a / 3; float q = a * (2*a*a - 9*b) / 27 + c; float p3 = p*p*p; float d = q*q + 4*p3 / 27; if (d >= 0) { float z = sqrtf(d); float u = (-q + z) / 2; float v = (-q - z) / 2; u = stbtt__cuberoot(u); v = stbtt__cuberoot(v); r[0] = s + u + v; return 1; } else { float u = sqrtf(-p/3); float v = acos(-sqrt(-27/p3) * q / 2) / 3; // p3 must be negative, since d is negative float m = cos(v); float n = cos(v-3.141592/2)*1.732050808f; r[0] = s + u * 2 * m; r[1] = s - u * (m + n); r[2] = s - u * (m - n); // these asserts may not be safe at all scales // though they're in bezier t parameter units so maybe? //ASSERT( fabs(((r[0]+a)*r[0]+b)*r[0]+c) < 0.05f); //ASSERT( fabs(((r[1]+a)*r[1]+b)*r[1]+c) < 0.05f); //ASSERT( fabs(((r[2]+a)*r[2]+b)*r[2]+c) < 0.05f); return 3; } } // This and stbtt_GetCodepointSDF() compute a discretized SDF field for // a single character, suitable for storing in a single-channel texture, // sampling with bilinear filtering, and testing against larger than // some threshold to produce scalable fonts. // // info -- the font // scale -- controls the size of the resulting SDF bitmap, same as it would be creating a regular bitmap // glyph/codepoint -- the character to generate the SDF for // padding -- extra "pixels" around the character which are filled with the distance to the character (not 0), // which allows effects like bit outlines // onedge_value -- value 0-255 to test the SDF against to reconstruct the character (i.e. the isocontour of the character) // pixel_dist_scale -- what value the SDF should increase by when moving one SDF "pixel" away from the edge (on the 0..255 scale) // if positive, > onedge_value is inside; if negative, < onedge_value is inside // width,height -- output height & width of the SDF bitmap (including padding) // xoff,yoff -- output origin of the character // return value -- a 2D array of bytes 0..255, width*height in size // // pixel_dist_scale & onedge_value are a scale & bias that allows you to make // optimal use of the limited 0..255 for your application, trading off precision // and special effects. SDF values outside the range 0..255 are clamped to 0..255. // // Example: // scale = stbtt_ScaleForPixelHeight(22) // padding = 5 // onedge_value = 180 // pixel_dist_scale = 180/5.0 = 36.0 // // This will create an SDF bitmap in which the character is about 22 pixels // high but the whole bitmap is about 22+5+5=32 pixels high. To produce a filled // shape, sample the SDF at each pixel and fill the pixel if the SDF value // is greater than or equal to 180/255. (You'll actually want to antialias, // which is beyond the scope of this example.) Additionally, you can compute // offset outlines (e.g. to stroke the character border inside & outside, // or only outside). For example, to fill outside the character up to 3 SDF // pixels, you would compare against (180-36.0*3)/255 = 72/255. The above // choice of variables maps a range from 5 pixels outside the shape to // 2 pixels inside the shape to 0..255; this is intended primarily for apply // outside effects only (the interior range is needed to allow proper // antialiasing of the font at *smaller* sizes) // // The function computes the SDF analytically at each SDF pixel, not by e.g. // building a higher-res bitmap and approximating it. In theory the quality // should be as high as possible for an SDF of this size & representation, but // unclear if this is true in practice (perhaps building a higher-res bitmap // and computing from that can allow drop-out prevention). // // The algorithm has not been optimized at all, so expect it to be slow // if computing lots of characters or very large sizes. unsigned char * stbtt_GetGlyphSDF(const stbtt_fontinfo *info, float scale, int glyph, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff) { float scale_x = scale, scale_y = scale; int ix0,iy0,ix1,iy1; int w,h; unsigned char *data; if (scale == 0) return NULL; stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale, scale, 0.0f,0.0f, &ix0,&iy0,&ix1,&iy1); // if empty, return NULL if (ix0 == ix1 || iy0 == iy1) return NULL; ix0 -= padding; iy0 -= padding; ix1 += padding; iy1 += padding; w = (ix1 - ix0); h = (iy1 - iy0); if (width ) *width = w; if (height) *height = h; if (xoff ) *xoff = ix0; if (yoff ) *yoff = iy0; // invert for y-downwards bitmaps scale_y = -scale_y; { int x,y,i,j; float *precompute; stbtt_vertex *verts; int num_verts = stbtt_GetGlyphShape(info, glyph, &verts); data = STBTT_malloc(w * h, info->userdata); precompute = STBTT_malloc(num_verts * sizeof(float), info->userdata); for (i=0,j=num_verts-1; i < num_verts; j=i++) { if (verts[i].type == STBTT_vline) { float x0 = verts[i].x*scale_x, y0 = verts[i].y*scale_y; float x1 = verts[j].x*scale_x, y1 = verts[j].y*scale_y; float dist = (float) sqrt((x1-x0)*(x1-x0) + (y1-y0)*(y1-y0)); precompute[i] = (dist == 0) ? 0.0f : 1.0f / dist; } else if (verts[i].type == STBTT_vcurve) { float x2 = verts[j].x *scale_x, y2 = verts[j].y *scale_y; float x1 = verts[i].cx*scale_x, y1 = verts[i].cy*scale_y; float x0 = verts[i].x *scale_x, y0 = verts[i].y *scale_y; float bx = x0 - 2*x1 + x2, by = y0 - 2*y1 + y2; float len2 = bx*bx + by*by; if (len2 != 0.0f) precompute[i] = 1.0f / (bx*bx + by*by); else precompute[i] = 0.0f; } else precompute[i] = 0.0f; } for (y=iy0; y < iy1; ++y) { for (x=ix0; x < ix1; ++x) { float val; float min_dist = 999999.0f; float sx = (float) x + 0.5f; float sy = (float) y + 0.5f; float x_gspace = (sx / scale_x); float y_gspace = (sy / scale_y); int winding = stbtt__compute_crossings_x(x_gspace, y_gspace, num_verts, verts); // @OPTIMIZE: this could just be a rasterization, but needs to be line vs. non-tesselated curves so a new path for (i=0; i < num_verts; ++i) { float x0 = verts[i].x*scale_x, y0 = verts[i].y*scale_y; if (verts[i].type == STBTT_vline && precompute[i] != 0.0f) { float x1 = verts[i-1].x*scale_x, y1 = verts[i-1].y*scale_y; float dist,dist2 = (x0-sx)*(x0-sx) + (y0-sy)*(y0-sy); if (dist2 < min_dist*min_dist) min_dist = (float) sqrt(dist2); // coarse culling against bbox //if (sx > MIN(x0,x1)-min_dist && sx < MAX(x0,x1)+min_dist && // sy > MIN(y0,y1)-min_dist && sy < MAX(y0,y1)+min_dist) dist = (float) fabs((x1-x0)*(y0-sy) - (y1-y0)*(x0-sx)) * precompute[i]; ASSERT(i != 0); if (dist < min_dist) { // check position along line // x' = x0 + t*(x1-x0), y' = y0 + t*(y1-y0) // minimize (x'-sx)*(x'-sx)+(y'-sy)*(y'-sy) float dx = x1-x0, dy = y1-y0; float px = x0-sx, py = y0-sy; // minimize (px+t*dx)^2 + (py+t*dy)^2 = px*px + 2*px*dx*t + t^2*dx*dx + py*py + 2*py*dy*t + t^2*dy*dy // derivative: 2*px*dx + 2*py*dy + (2*dx*dx+2*dy*dy)*t, set to 0 and solve float t = -(px*dx + py*dy) / (dx*dx + dy*dy); if (t >= 0.0f && t <= 1.0f) min_dist = dist; } } else if (verts[i].type == STBTT_vcurve) { float x2 = verts[i-1].x *scale_x, y2 = verts[i-1].y *scale_y; float x1 = verts[i ].cx*scale_x, y1 = verts[i ].cy*scale_y; float box_x0 = MIN(MIN(x0,x1),x2); float box_y0 = MIN(MIN(y0,y1),y2); float box_x1 = MAX(MAX(x0,x1),x2); float box_y1 = MAX(MAX(y0,y1),y2); // coarse culling against bbox to avoid computing cubic unnecessarily if (sx > box_x0-min_dist && sx < box_x1+min_dist && sy > box_y0-min_dist && sy < box_y1+min_dist) { int num=0; float ax = x1-x0, ay = y1-y0; float bx = x0 - 2*x1 + x2, by = y0 - 2*y1 + y2; float mx = x0 - sx, my = y0 - sy; float res[3] = {0.f,0.f,0.f}; float px,py,t,it,dist2; float a_inv = precompute[i]; if (a_inv == 0.0) { // if a_inv is 0, it's 2nd degree so use quadratic formula float a = 3*(ax*bx + ay*by); float b = 2*(ax*ax + ay*ay) + (mx*bx+my*by); float c = mx*ax+my*ay; if (a == 0.0) { // if a is 0, it's linear if (b != 0.0) { res[num++] = -c/b; } } else { float discriminant = b*b - 4*a*c; if (discriminant < 0) num = 0; else { float root = (float) sqrt(discriminant); res[0] = (-b - root)/(2*a); res[1] = (-b + root)/(2*a); num = 2; // don't bother distinguishing 1-solution case, as code below will still work } } } else { float b = 3*(ax*bx + ay*by) * a_inv; // could precompute this as it doesn't depend on sample point float c = (2*(ax*ax + ay*ay) + (mx*bx+my*by)) * a_inv; float d = (mx*ax+my*ay) * a_inv; num = stbtt__solve_cubic(b, c, d, res); } dist2 = (x0-sx)*(x0-sx) + (y0-sy)*(y0-sy); if (dist2 < min_dist*min_dist) min_dist = (float) sqrt(dist2); if (num >= 1 && res[0] >= 0.0f && res[0] <= 1.0f) { t = res[0], it = 1.0f - t; px = it*it*x0 + 2*t*it*x1 + t*t*x2; py = it*it*y0 + 2*t*it*y1 + t*t*y2; dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); if (dist2 < min_dist * min_dist) min_dist = (float) sqrt(dist2); } if (num >= 2 && res[1] >= 0.0f && res[1] <= 1.0f) { t = res[1], it = 1.0f - t; px = it*it*x0 + 2*t*it*x1 + t*t*x2; py = it*it*y0 + 2*t*it*y1 + t*t*y2; dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); if (dist2 < min_dist * min_dist) min_dist = (float) sqrt(dist2); } if (num >= 3 && res[2] >= 0.0f && res[2] <= 1.0f) { t = res[2], it = 1.0f - t; px = it*it*x0 + 2*t*it*x1 + t*t*x2; py = it*it*y0 + 2*t*it*y1 + t*t*y2; dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); if (dist2 < min_dist * min_dist) min_dist = (float) sqrt(dist2); } } } } if (winding == 0) min_dist = -min_dist; // if outside the shape, value is negative val = onedge_value + pixel_dist_scale * min_dist; if (val < 0) val = 0; else if (val > 255) val = 255; data[(y-iy0)*w+(x-ix0)] = (unsigned char) val; } } STBTT_free(precompute, info->userdata); STBTT_free(verts, info->userdata); } return data; } unsigned char *stbtt_GetCodepointSDF(const stbtt_fontinfo *info, float scale, int codepoint, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff) { return stbtt_GetGlyphSDF(info, scale, stbtt_FindGlyphIndex(info, codepoint), padding, onedge_value, pixel_dist_scale, width, height, xoff, yoff); } // Frees Signed Distance Function bitmap. void stbtt_FreeSDF(unsigned char *bitmap, void *userdata) { STBTT_free(bitmap, userdata); } ////////////////////////////////////////////////////////////////////////////// // // font name matching -- recommended not to use this // // check if a utf8 string contains a prefix which is the utf16 string; if so return length of matching utf8 string static int32_t stbtt__CompareUTF8toUTF16_bigendian_prefix(uint8_t *s1, int32_t len1, uint8_t *s2, int32_t len2) { int32_t i=0; // convert utf16 to utf8 and compare the results while converting while (len2) { uint16_t ch = s2[0]*256 + s2[1]; if (ch < 0x80) { if (i >= len1) return -1; if (s1[i++] != ch) return -1; } else if (ch < 0x800) { if (i+1 >= len1) return -1; if (s1[i++] != 0xc0 + (ch >> 6)) return -1; if (s1[i++] != 0x80 + (ch & 0x3f)) return -1; } else if (ch >= 0xd800 && ch < 0xdc00) { uint32_t c; uint16_t ch2 = s2[2]*256 + s2[3]; if (i+3 >= len1) return -1; c = ((ch - 0xd800) << 10) + (ch2 - 0xdc00) + 0x10000; if (s1[i++] != 0xf0 + (c >> 18)) return -1; if (s1[i++] != 0x80 + ((c >> 12) & 0x3f)) return -1; if (s1[i++] != 0x80 + ((c >> 6) & 0x3f)) return -1; if (s1[i++] != 0x80 + ((c ) & 0x3f)) return -1; s2 += 2; // plus another 2 below len2 -= 2; } else if (ch >= 0xdc00 && ch < 0xe000) { return -1; } else { if (i+2 >= len1) return -1; if (s1[i++] != 0xe0 + (ch >> 12)) return -1; if (s1[i++] != 0x80 + ((ch >> 6) & 0x3f)) return -1; if (s1[i++] != 0x80 + ((ch ) & 0x3f)) return -1; } s2 += 2; len2 -= 2; } return i; } static int stbtt_CompareUTF8toUTF16_bigendian_internal(char *s1, int len1, char *s2, int len2) { return len1 == stbtt__CompareUTF8toUTF16_bigendian_prefix((uint8_t*) s1, len1, (uint8_t*) s2, len2); } // Returns the string (which may be big-endian double byte, e.g. for unicode) // and puts the length in bytes in *length. // // some of the values for the IDs are below; for more see the truetype spec: // http://developer.apple.com/textfonts/TTRefMan/RM06/Chap6name.html // http://www.microsoft.com/typography/otspec/name.htm // // returns results in whatever encoding you request... but note that 2-byte encodings // will be BIG-ENDIAN... use stbtt_CompareUTF8toUTF16_bigendian() to compare const char *stbtt_GetFontNameString(const stbtt_fontinfo *font, int *length, int platformID, int encodingID, int languageID, int nameID) { int32_t i,count,stringOffset; uint8_t *fc = font->data; uint32_t offset = font->fontstart; uint32_t nm = stbtt__find_table(fc, offset, "name"); if (!nm) return NULL; count = ttUSHORT(fc+nm+2); stringOffset = nm + ttUSHORT(fc+nm+4); for (i=0; i < count; ++i) { uint32_t loc = nm + 6 + 12 * i; if (platformID == ttUSHORT(fc+loc+0) && encodingID == ttUSHORT(fc+loc+2) && languageID == ttUSHORT(fc+loc+4) && nameID == ttUSHORT(fc+loc+6)) { *length = ttUSHORT(fc+loc+8); return (const char *) (fc+stringOffset+ttUSHORT(fc+loc+10)); } } return NULL; } static int stbtt__matchpair(uint8_t *fc, uint32_t nm, uint8_t *name, int32_t nlen, int32_t target_id, int32_t next_id) { int32_t i; int32_t count = ttUSHORT(fc+nm+2); int32_t stringOffset = nm + ttUSHORT(fc+nm+4); for (i=0; i < count; ++i) { uint32_t loc = nm + 6 + 12 * i; int32_t id = ttUSHORT(fc+loc+6); if (id == target_id) { // find the encoding int32_t platform = ttUSHORT(fc+loc+0), encoding = ttUSHORT(fc+loc+2), language = ttUSHORT(fc+loc+4); // is this a Unicode encoding? if (platform == 0 || (platform == 3 && encoding == 1) || (platform == 3 && encoding == 10)) { int32_t slen = ttUSHORT(fc+loc+8); int32_t off = ttUSHORT(fc+loc+10); // check if there's a prefix match int32_t matchlen = stbtt__CompareUTF8toUTF16_bigendian_prefix(name, nlen, fc+stringOffset+off,slen); if (matchlen >= 0) { // check for target_id+1 immediately following, with same encoding & language if (i+1 < count && ttUSHORT(fc+loc+12+6) == next_id && ttUSHORT(fc+loc+12) == platform && ttUSHORT(fc+loc+12+2) == encoding && ttUSHORT(fc+loc+12+4) == language) { slen = ttUSHORT(fc+loc+12+8); off = ttUSHORT(fc+loc+12+10); if (slen == 0) { if (matchlen == nlen) return 1; } else if (matchlen < nlen && name[matchlen] == ' ') { ++matchlen; if (stbtt_CompareUTF8toUTF16_bigendian_internal((char*) (name+matchlen), nlen-matchlen, (char*)(fc+stringOffset+off),slen)) return 1; } } else { // if nothing immediately following if (matchlen == nlen) return 1; } } } // @TODO handle other encodings } } return 0; } static int stbtt__matches(uint8_t *fc, uint32_t offset, uint8_t *name, int32_t flags) { int32_t nlen = (int32_t) strlen((char *) name); uint32_t nm,hd; if (!stbtt__isfont(fc+offset)) return 0; // check italics/bold/underline flags in macStyle... if (flags) { hd = stbtt__find_table(fc, offset, "head"); if ((ttUSHORT(fc+hd+44) & 7) != (flags & 7)) return 0; } nm = stbtt__find_table(fc, offset, "name"); if (!nm) return 0; if (flags) { // if we checked the macStyle flags, then just check the family and ignore the subfamily if (stbtt__matchpair(fc, nm, name, nlen, 16, -1)) return 1; if (stbtt__matchpair(fc, nm, name, nlen, 1, -1)) return 1; if (stbtt__matchpair(fc, nm, name, nlen, 3, -1)) return 1; } else { if (stbtt__matchpair(fc, nm, name, nlen, 16, 17)) return 1; if (stbtt__matchpair(fc, nm, name, nlen, 1, 2)) return 1; if (stbtt__matchpair(fc, nm, name, nlen, 3, -1)) return 1; } return 0; } static int stbtt_FindMatchingFont_internal(unsigned char *font_collection, char *name_utf8, int32_t flags) { int32_t i; for (i=0;;++i) { int32_t off = stbtt_GetFontOffsetForIndex(font_collection, i); if (off < 0) return off; if (stbtt__matches((uint8_t *) font_collection, off, (uint8_t*) name_utf8, flags)) return off; } } #if defined(__GNUC__) || defined(__clang__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-qual" #endif int stbtt_BakeFontBitmap(const unsigned char *data, int offset, float pixel_height, unsigned char *pixels, int pw, int ph, int first_char, int num_chars, stbtt_bakedchar *chardata) { return stbtt_BakeFontBitmap_internal((unsigned char *) data, offset, pixel_height, pixels, pw, ph, first_char, num_chars, chardata); } // Each .ttf/.ttc file may have more than one font. Each font has a sequential // index number starting from 0. Call this function to get the font offset for // a given index; it returns -1 if the index is out of range. A regular .ttf // file will only define one font and it always be at offset 0, so it will // return '0' for index 0, and -1 for all other indices. int stbtt_GetFontOffsetForIndex(const unsigned char *data, int index) { return stbtt_GetFontOffsetForIndex_internal((unsigned char *) data, index); } // This function will determine the number of fonts in a font file. TrueType // collection (.ttc) files may contain multiple fonts, while TrueType font // (.ttf) files only contain one font. The number of fonts can be used for // indexing with the previous function where the index is between zero and one // less than the total fonts. If an error occurs, -1 is returned. int stbtt_GetNumberOfFonts(const unsigned char *data) { return stbtt_GetNumberOfFonts_internal((unsigned char *) data); } // Given an offset into the file that defines a font, this function builds // the necessary cached info for the rest of the system. You must allocate // the stbtt_fontinfo yourself, and stbtt_InitFont will fill it out. You don't // need to do anything special to free it, because the contents are pure // value data with no additional data structures. Returns 0 on failure. int stbtt_InitFont(stbtt_fontinfo *info, const unsigned char *data, int offset) { return stbtt_InitFont_internal(info, (unsigned char *) data, offset); } // Finding the right font... // // You should really just solve this offline, keep your own tables // of what font is what, and don't try to get it out of the .ttf file. // That's because getting it out of the .ttf file is really hard, because // the names in the file can appear in many possible encodings, in many // possible languages, and e.g. if you need a case-insensitive comparison, // the details of that depend on the encoding & language in a complex way // (actually underspecified in truetype, but also gigantic). // // But you can use the provided functions in two possible ways: // stbtt_FindMatchingFont() will use *case-sensitive* comparisons on // unicode-encoded names to try to find the font you want; // you can run this before calling stbtt_InitFont() // // stbtt_GetFontNameString() lets you get any of the various strings // from the file yourself and do your own comparisons on them. // You have to have called stbtt_InitFont() first. // returns the offset (not index) of the font that matches, or -1 if none // if you use STBTT_MACSTYLE_DONTCARE, use a font name like "Arial Bold". // if you use any other flag, use a font name like "Arial"; this checks // the 'macStyle' header field; i don't know if fonts set this consistently int stbtt_FindMatchingFont(const unsigned char *fontdata, const char *name, int flags) { return stbtt_FindMatchingFont_internal((unsigned char *) fontdata, (char *) name, flags); } // returns 1/0 whether the first string interpreted as utf8 is identical to // the second string interpreted as big-endian utf16... useful for strings from next func int stbtt_CompareUTF8toUTF16_bigendian(const char *s1, int len1, const char *s2, int len2) { return stbtt_CompareUTF8toUTF16_bigendian_internal((char *) s1, len1, (char *) s2, len2); } // FULL VERSION HISTORY // // 1.25 (2021-07-11) many fixes // 1.24 (2020-02-05) fix warning // 1.23 (2020-02-02) query SVG data for glyphs; query whole kerning table (but only kern not GPOS) // 1.22 (2019-08-11) minimize missing-glyph duplication; fix kerning if both 'GPOS' and 'kern' are defined // 1.21 (2019-02-25) fix warning // 1.20 (2019-02-07) PackFontRange skips missing codepoints; GetScaleFontVMetrics() // 1.19 (2018-02-11) OpenType GPOS kerning (horizontal only), fmod // 1.18 (2018-01-29) add missing function // 1.17 (2017-07-23) make more arguments const; doc fix // 1.16 (2017-07-12) SDF support // 1.15 (2017-03-03) make more arguments const // 1.14 (2017-01-16) num-fonts-in-TTC function // 1.13 (2017-01-02) support OpenType fonts, certain Apple fonts // 1.12 (2016-10-25) suppress warnings about casting away const with -Wcast-qual // 1.11 (2016-04-02) fix unused-variable warning // 1.10 (2016-04-02) allow user-defined fabs() replacement // fix memory leak if fontsize=0.0 // fix warning from duplicate typedef // 1.09 (2016-01-16) warning fix; avoid crash on outofmem; use alloc userdata for PackFontRanges // 1.08 (2015-09-13) document stbtt_Rasterize(); fixes for vertical & horizontal edges // 1.07 (2015-08-01) allow PackFontRanges to accept arrays of sparse codepoints; // allow PackFontRanges to pack and render in separate phases; // fix stbtt_GetFontOFfsetForIndex (never worked for non-0 input?); // fixed an ASSERT() bug in the new rasterizer // replace ASSERT() with ASSERT() in new rasterizer // 1.06 (2015-07-14) performance improvements (~35% faster on x86 and x64 on test machine) // also more precise AA rasterizer, except if shapes overlap // remove need for STBTT_sort // 1.05 (2015-04-15) fix misplaced definitions for STBTT_STATIC // 1.04 (2015-04-15) typo in example // 1.03 (2015-04-12) STBTT_STATIC, fix memory leak in new packing, various fixes // 1.02 (2014-12-10) fix various warnings & compile issues w/ stb_rect_pack, C++ // 1.01 (2014-12-08) fix subpixel position when oversampling to exactly match // non-oversampled; STBTT_POINT_SIZE for packed case only // 1.00 (2014-12-06) add new PackBegin etc. API, w/ support for oversampling // 0.99 (2014-09-18) fix multiple bugs with subpixel rendering (ryg) // 0.9 (2014-08-07) support certain mac/iOS fonts without an MS platformID // 0.8b (2014-07-07) fix a warning // 0.8 (2014-05-25) fix a few more warnings // 0.7 (2013-09-25) bugfix: subpixel glyph bug fixed in 0.5 had come back // 0.6c (2012-07-24) improve documentation // 0.6b (2012-07-20) fix a few more warnings // 0.6 (2012-07-17) fix warnings; added stbtt_ScaleForMappingEmToPixels, // stbtt_GetFontBoundingBox, stbtt_IsGlyphEmpty // 0.5 (2011-12-09) bugfixes: // subpixel glyph renderer computed wrong bounding box // first vertex of shape can be off-curve (FreeSans) // 0.4b (2011-12-03) fixed an error in the font baking example // 0.4 (2011-12-01) kerning, subpixel rendering (tor) // bugfixes for: // codepoint-to-glyph conversion using table fmt=12 // codepoint-to-glyph conversion using table fmt=4 // stbtt_GetBakedQuad with non-square texture (Zer) // updated Hello World! sample to use kerning and subpixel // fixed some warnings // 0.3 (2009-06-24) cmap fmt=12, compound shapes (MM) // userdata, malloc-from-userdata, non-zero fill (stb) // 0.2 (2009-03-11) Fix unsigned/signed char warnings // 0.1 (2009-03-09) First public release //
175,752
4,285
jart/cosmopolitan
false
cosmopolitan/third_party/stb/stb_rect_pack.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:3;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=3 sts=3 sw=3 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ │ │ stb_truetype │ │ Copyright 2017 Sean Barrett │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/mem/alg.h" #include "libc/assert.h" #include "libc/dce.h" #include "third_party/stb/stb_rect_pack.h" asm(".ident\t\"\\n\\n\ stb_rect_pack (MIT License)\\n\ Copyright 2017 Sean Barrett\""); asm(".include \"libc/disclaimer.inc\""); /* clang-format off */ // stb_rect_pack.h - v1.01 - public domain - rectangle packing // Sean Barrett 2014 // // Useful for e.g. packing rectangular textures into an atlas. // Does not do rotation. // // in the file that you want to have the implementation. // // Not necessarily the awesomest packing method, but better than // the totally naive one in stb_truetype (which is primarily what // this is meant to replace). // // Has only had a few tests run, may have issues. // // More docs to come. // // No memory allocations; uses qsort() and assert() from stdlib. // Can override those by defining STBRP_SORT and STBRP_ASSERT. // // This library currently uses the Skyline Bottom-Left algorithm. // // Please note: better rectangle packers are welcome! Please // implement them to the same API, but with a different init // function. // // Credits // // Library // Sean Barrett // Minor features // Martins Mozeiko // github:IntellectualKitty // // Bugfixes / warning fixes // Jeremy Jaussaud // Fabian Giesen // // Version history: // // 1.01 (2021-07-11) always use large rect mode, expose STBRP__MAXVAL in public section // 1.00 (2019-02-25) avoid small space waste; gracefully fail too-wide rectangles // 0.99 (2019-02-07) warning fixes // 0.11 (2017-03-03) return packing success/fail result // 0.10 (2016-10-25) remove cast-away-const to avoid warnings // 0.09 (2016-08-27) fix compiler warnings // 0.08 (2015-09-13) really fix bug with empty rects (w=0 or h=0) // 0.07 (2015-09-13) fix bug with empty rects (w=0 or h=0) // 0.06 (2015-04-15) added STBRP_SORT to allow replacing qsort // 0.05: added STBRP_ASSERT to allow replacing assert // 0.04: fixed minor bug in STBRP_LARGE_RECTS support // 0.01: initial release #define STBRP__INIT_skyline 1 typedef struct { int x,y; stbrp_node **prev_link; } stbrp__findresult; // Optionally select which packing heuristic the library should use. Different // heuristics will produce better/worse results for different data sets. // If you call init again, this will be reset to the default. void stbrp_setup_heuristic(stbrp_context *context, int heuristic) { switch (context->init_mode) { case STBRP__INIT_skyline: assert(heuristic == STBRP_HEURISTIC_Skyline_BL_sortHeight || heuristic == STBRP_HEURISTIC_Skyline_BF_sortHeight); context->heuristic = heuristic; break; default: unreachable; } } // Optionally call this function after init but before doing any packing to // change the handling of the out-of-temp-memory scenario, described above. // If you call init again, this will be reset to the default (false). void stbrp_setup_allow_out_of_mem(stbrp_context *context, int allow_out_of_mem) { if (allow_out_of_mem) { // if it's ok to run out of memory, then don't bother aligning them; // this gives better packing, but may fail due to OOM (even though // the rectangles easily fit). @TODO a smarter approach would be to only // quantize once we've hit OOM, then we could get rid of this parameter. context->align = 1; } else { // if it's not ok to run out of memory, then quantize the widths // so that num_nodes is always enough nodes. // // I.e. num_nodes * align >= width // align >= width / num_nodes // align = ceil(width/num_nodes) context->align = (context->width + context->num_nodes-1) / context->num_nodes; } } // Initialize a rectangle packer to: // pack a rectangle that is 'width' by 'height' in dimensions // using temporary storage provided by the array 'nodes', which is 'num_nodes' long // // You must call this function every time you start packing into a new target. // // There is no "shutdown" function. The 'nodes' memory must stay valid for // the following stbrp_pack_rects() call (or calls), but can be freed after // the call (or calls) finish. // // Note: to guarantee best results, either: // 1. make sure 'num_nodes' >= 'width' // or 2. call stbrp_allow_out_of_mem() defined below with 'allow_out_of_mem = 1' // // If you don't do either of the above things, widths will be quantized to multiples // of small integers to guarantee the algorithm doesn't run out of temporary storage. // // If you do #2, then the non-quantized algorithm will be used, but the algorithm // may run out of temporary storage and be unable to pack some rectangles. void stbrp_init_target(stbrp_context *context, int width, int height, stbrp_node *nodes, int num_nodes) { int i; for (i=0; i < num_nodes-1; ++i) nodes[i].next = &nodes[i+1]; nodes[i].next = NULL; context->init_mode = STBRP__INIT_skyline; context->heuristic = STBRP_HEURISTIC_Skyline_default; context->free_head = &nodes[0]; context->active_head = &context->extra[0]; context->width = width; context->height = height; context->num_nodes = num_nodes; stbrp_setup_allow_out_of_mem(context, 0); // node 0 is the full width, node 1 is the sentinel (lets us not store width explicitly) context->extra[0].x = 0; context->extra[0].y = 0; context->extra[0].next = &context->extra[1]; context->extra[1].x = (stbrp_coord) width; context->extra[1].y = (1<<30); context->extra[1].next = NULL; } // find minimum y position if it starts at x1 static int stbrp__skyline_find_min_y(stbrp_context *c, stbrp_node *first, int x0, int width, int *pwaste) { stbrp_node *node = first; int x1 = x0 + width; int min_y, visited_width, waste_area; assert(first->x <= x0); #if 0 // skip in case we're past the node while (node->next->x <= x0) ++node; #else assert(node->next->x > x0); // we ended up handling this in the caller for efficiency #endif assert(node->x <= x0); min_y = 0; waste_area = 0; visited_width = 0; while (node->x < x1) { if (node->y > min_y) { // raise min_y higher. // we've accounted for all waste up to min_y, // but we'll now add more waste for everything we've visted waste_area += visited_width * (node->y - min_y); min_y = node->y; // the first time through, visited_width might be reduced if (node->x < x0) visited_width += node->next->x - x0; else visited_width += node->next->x - node->x; } else { // add waste area int under_width = node->next->x - node->x; if (under_width + visited_width > width) under_width = width - visited_width; waste_area += under_width * (min_y - node->y); visited_width += under_width; } node = node->next; } *pwaste = waste_area; return min_y; } static stbrp__findresult stbrp__skyline_find_best_pos(stbrp_context *c, int width, int height) { int best_waste = (1<<30), best_x, best_y = (1 << 30); stbrp__findresult fr; stbrp_node **prev, *node, *tail, **best = NULL; // align to multiple of c->align width = (width + c->align - 1); width -= width % c->align; assert(width % c->align == 0); // if it can't possibly fit, bail immediately if (width > c->width || height > c->height) { fr.prev_link = NULL; fr.x = fr.y = 0; return fr; } node = c->active_head; prev = &c->active_head; while (node->x + width <= c->width) { int y,waste; y = stbrp__skyline_find_min_y(c, node, node->x, width, &waste); if (c->heuristic == STBRP_HEURISTIC_Skyline_BL_sortHeight) { // actually just want to test BL // bottom left if (y < best_y) { best_y = y; best = prev; } } else { // best-fit if (y + height <= c->height) { // can only use it if it first vertically if (y < best_y || (y == best_y && waste < best_waste)) { best_y = y; best_waste = waste; best = prev; } } } prev = &node->next; node = node->next; } best_x = (best == NULL) ? 0 : (*best)->x; // if doing best-fit (BF), we also have to try aligning right edge to each node position // // e.g, if fitting // // ____________________ // |____________________| // // into // // | | // | ____________| // |____________| // // then right-aligned reduces waste, but bottom-left BL is always chooses left-aligned // // This makes BF take about 2x the time if (c->heuristic == STBRP_HEURISTIC_Skyline_BF_sortHeight) { tail = c->active_head; node = c->active_head; prev = &c->active_head; // find first node that's admissible while (tail->x < width) tail = tail->next; while (tail) { int xpos = tail->x - width; int y,waste; assert(xpos >= 0); // find the left position that matches this while (node->next->x <= xpos) { prev = &node->next; node = node->next; } assert(node->next->x > xpos && node->x <= xpos); y = stbrp__skyline_find_min_y(c, node, xpos, width, &waste); if (y + height <= c->height) { if (y <= best_y) { if (y < best_y || waste < best_waste || (waste==best_waste && xpos < best_x)) { best_x = xpos; assert(y <= best_y); best_y = y; best_waste = waste; best = prev; } } } tail = tail->next; } } fr.prev_link = best; fr.x = best_x; fr.y = best_y; return fr; } static stbrp__findresult stbrp__skyline_pack_rectangle(stbrp_context *context, int width, int height) { // find best position according to heuristic stbrp__findresult res = stbrp__skyline_find_best_pos(context, width, height); stbrp_node *node, *cur; // bail if: // 1. it failed // 2. the best node doesn't fit (we don't always check this) // 3. we're out of memory if (res.prev_link == NULL || res.y + height > context->height || context->free_head == NULL) { res.prev_link = NULL; return res; } // on success, create new node node = context->free_head; node->x = (stbrp_coord) res.x; node->y = (stbrp_coord) (res.y + height); context->free_head = node->next; // insert the new node into the right starting point, and // let 'cur' point to the remaining nodes needing to be // stiched back in cur = *res.prev_link; if (cur->x < res.x) { // preserve the existing one, so start testing with the next one stbrp_node *next = cur->next; cur->next = node; cur = next; } else { *res.prev_link = node; } // from here, traverse cur and free the nodes, until we get to one // that shouldn't be freed while (cur->next && cur->next->x <= res.x + width) { stbrp_node *next = cur->next; // move the current node to the free list cur->next = context->free_head; context->free_head = cur; cur = next; } // stitch the list back in node->next = cur; if (cur->x < res.x + width) cur->x = (stbrp_coord) (res.x + width); #ifndef NDEBUG cur = context->active_head; while (cur->x < context->width) { assert(cur->x < cur->next->x); cur = cur->next; } assert(cur->next == NULL); { int count=0; cur = context->active_head; while (cur) { cur = cur->next; ++count; } cur = context->free_head; while (cur) { cur = cur->next; ++count; } assert(count == context->num_nodes+2); } #endif return res; } static int rect_height_compare(const void *a, const void *b) { const stbrp_rect *p = (const stbrp_rect *) a; const stbrp_rect *q = (const stbrp_rect *) b; if (p->h > q->h) return -1; if (p->h < q->h) return 1; return (p->w > q->w) ? -1 : (p->w < q->w); } static int rect_original_order(const void *a, const void *b) { const stbrp_rect *p = (const stbrp_rect *) a; const stbrp_rect *q = (const stbrp_rect *) b; return (p->was_packed < q->was_packed) ? -1 : (p->was_packed > q->was_packed); } // Assign packed locations to rectangles. The rectangles are of type // 'stbrp_rect' defined below, stored in the array 'rects', and there // are 'num_rects' many of them. // // Rectangles which are successfully packed have the 'was_packed' flag // set to a non-zero value and 'x' and 'y' store the minimum location // on each axis (i.e. bottom-left in cartesian coordinates, top-left // if you imagine y increasing downwards). Rectangles which do not fit // have the 'was_packed' flag set to 0. // // You should not try to access the 'rects' array from another thread // while this function is running, as the function temporarily reorders // the array while it executes. // // To pack into another rectangle, you need to call stbrp_init_target // again. To continue packing into the same rectangle, you can call // this function again. Calling this multiple times with multiple rect // arrays will probably produce worse packing results than calling it // a single time with the full rectangle array, but the option is // available. // // The function returns 1 if all of the rectangles were successfully // packed and 0 otherwise. int stbrp_pack_rects(stbrp_context *context, stbrp_rect *rects, int num_rects) { stbrp__findresult fr; int i, all_rects_packed = 1; // we use the 'was_packed' field internally to allow sorting/unsorting for (i=0; i < num_rects; ++i) { rects[i].was_packed = i; } // sort according to heuristic qsort(rects, num_rects, sizeof(rects[0]), rect_height_compare); for (i=0; i < num_rects; ++i) { if (rects[i].w == 0 || rects[i].h == 0) { rects[i].x = rects[i].y = 0; // empty rect needs no space } else { fr = stbrp__skyline_pack_rectangle(context, rects[i].w, rects[i].h); if (fr.prev_link) { rects[i].x = (stbrp_coord) fr.x; rects[i].y = (stbrp_coord) fr.y; } else { rects[i].x = rects[i].y = STBRP__MAXVAL; } } } // unsort qsort(rects, num_rects, sizeof(rects[0]), rect_original_order); // set was_packed flags and all_rects_packed status for (i=0; i < num_rects; ++i) { rects[i].was_packed = !(rects[i].x == STBRP__MAXVAL && rects[i].y == STBRP__MAXVAL); if (!rects[i].was_packed) all_rects_packed = 0; } // return the all_rects_packed status return all_rects_packed; }
17,586
461
jart/cosmopolitan
false
cosmopolitan/third_party/stb/stb_rect_pack.h
#ifndef COSMOPOLITAN_THIRD_PARTY_STB_STB_RECT_PACK_H_ #define COSMOPOLITAN_THIRD_PARTY_STB_STB_RECT_PACK_H_ #define STBRP__MAXVAL 0x7fffffff #define STB_RECT_PACK_VERSION 1 #define STBRP_HEURISTIC_Skyline_default 0 #define STBRP_HEURISTIC_Skyline_BL_sortHeight 0 #define STBRP_HEURISTIC_Skyline_BF_sortHeight 1 #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ typedef struct stbrp_context stbrp_context; typedef struct stbrp_node stbrp_node; typedef struct stbrp_rect stbrp_rect; typedef int stbrp_coord; struct stbrp_node { stbrp_coord x, y; stbrp_node *next; }; struct stbrp_context { int width; int height; int align; int init_mode; int heuristic; int num_nodes; stbrp_node *active_head; stbrp_node *free_head; stbrp_node extra[2]; // so optimal user-node-count is 'width' not 'width+2' }; struct stbrp_rect { int id; // reserved for your use: stbrp_coord w, h; // input stbrp_coord x, y; // output int was_packed; // non-zero if valid packing }; // 16 bytes, nominally void stbrp_init_target(stbrp_context *, int, int, stbrp_node *, int); void stbrp_setup_allow_out_of_mem(stbrp_context *, int); void stbrp_setup_heuristic(stbrp_context *, int); int stbrp_pack_rects(stbrp_context *, stbrp_rect *, int); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_THIRD_PARTY_STB_STB_RECT_PACK_H_ */
1,433
51
jart/cosmopolitan
false
cosmopolitan/third_party/stb/stb_image_resize.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2020 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/assert.h" #include "libc/macros.internal.h" #include "libc/math.h" #include "libc/mem/mem.h" #include "libc/str/str.h" #include "third_party/stb/stb_image_resize.h" asm(".ident\t\"\\n\\n\ stb_image_resize (Public Domain)\\n\ Credit: Jorge L Rodriguez (@VinoBS), Sean Barrett, et al.\\n\ http://nothings.org/stb\""); #define STBIR_ASSERT(x) assert(x) #define STBIR_MALLOC(size, c) ((void)(c), malloc(size)) #define STBIR_FREE(ptr, c) ((void)(c), free(ptr)) #define STBIR__UNUSED_PARAM(v) #define STBIR__NOTUSED(v) #define STBIR__ARRAY_SIZE(a) ARRAYLEN(a) #ifndef STBIR_DEFAULT_FILTER_UPSAMPLE #define STBIR_DEFAULT_FILTER_UPSAMPLE STBIR_FILTER_CATMULLROM #endif #ifndef STBIR_DEFAULT_FILTER_DOWNSAMPLE #define STBIR_DEFAULT_FILTER_DOWNSAMPLE STBIR_FILTER_MITCHELL #endif #ifndef STBIR_PROGRESS_REPORT #define STBIR_PROGRESS_REPORT(float_0_to_1) #endif #ifndef STBIR_MAX_CHANNELS #define STBIR_MAX_CHANNELS 64 #endif #if STBIR_MAX_CHANNELS > 65536 #error "Too many channels; STBIR_MAX_CHANNELS must be no more than 65536." // because we store the indices in 16-bit variables #endif // This value is added to alpha just before premultiplication to avoid // zeroing out color values. It is equivalent to 2^-80. If you don't want // that behavior (it may interfere if you have floating point images with // very small alpha values) then you can define STBIR_NO_ALPHA_EPSILON to // disable it. #ifndef STBIR_ALPHA_EPSILON #define STBIR_ALPHA_EPSILON \ ((float)1 / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20)) #endif // must match stbir_datatype static unsigned char stbir__type_size[] = { 1, // STBIR_TYPE_UINT8 2, // STBIR_TYPE_UINT16 4, // STBIR_TYPE_UINT32 4, // STBIR_TYPE_FLOAT }; // Kernel function centered at 0 typedef float(stbir__kernel_fn)(float x, float scale); typedef float(stbir__support_fn)(float scale); typedef struct { stbir__kernel_fn* kernel; stbir__support_fn* support; } stbir__filter_info; // When upsampling, the contributors are which source pixels contribute. // When downsampling, the contributors are which destination pixels are // contributed to. typedef struct { int n0; // First contributing pixel int n1; // Last contributing pixel } stbir__contributors; typedef struct { const void* input_data; int input_w; int input_h; int input_stride_bytes; void* output_data; int output_w; int output_h; int output_stride_bytes; float s0, t0, s1, t1; float horizontal_shift; // Units: output pixels float vertical_shift; // Units: output pixels float horizontal_scale; float vertical_scale; int channels; int alpha_channel; uint32_t flags; stbir_datatype type; stbir_filter horizontal_filter; stbir_filter vertical_filter; stbir_edge edge_horizontal; stbir_edge edge_vertical; stbir_colorspace colorspace; stbir__contributors* horizontal_contributors; float* horizontal_coefficients; stbir__contributors* vertical_contributors; float* vertical_coefficients; int decode_buffer_pixels; float* decode_buffer; float* horizontal_buffer; // cache these because ceil/floor are inexplicably showing up in profile int horizontal_coefficient_width; int vertical_coefficient_width; int horizontal_filter_pixel_width; int vertical_filter_pixel_width; int horizontal_filter_pixel_margin; int vertical_filter_pixel_margin; int horizontal_num_contributors; int vertical_num_contributors; int ring_buffer_length_bytes; // The length of an individual entry in the // ring buffer. The total number of ring // buffers is // stbir__get_filter_pixel_width(filter) int ring_buffer_num_entries; // Total number of entries in the ring buffer. int ring_buffer_first_scanline; int ring_buffer_last_scanline; int ring_buffer_begin_index; // first_scanline is at this index in the ring // buffer float* ring_buffer; float* encode_buffer; // A temporary buffer to store floats so we don't lose // precision while we do multiply-adds. int horizontal_contributors_size; int horizontal_coefficients_size; int vertical_contributors_size; int vertical_coefficients_size; int decode_buffer_size; int horizontal_buffer_size; int ring_buffer_size; int encode_buffer_size; } stbir__info; static const float stbir__max_uint8_as_float = 255.0f; static const float stbir__max_uint16_as_float = 65535.0f; static const double stbir__max_uint32_as_float = 4294967295.0; forceinline int stbir__min(int a, int b) { return a < b ? a : b; } forceinline float stbir__saturate(float x) { if (x < 0) return 0; if (x > 1) return 1; return x; } #ifdef STBIR_SATURATE_INT forceinline uint8_t stbir__saturate8(int x) { if ((unsigned int)x <= 255) return x; if (x < 0) return 0; return 255; } forceinline uint16_t stbir__saturate16(int x) { if ((unsigned int)x <= 65535) return x; if (x < 0) return 0; return 65535; } #endif static float stbir__srgb_uchar_to_linear_float[256] = { 0.000000f, 0.000304f, 0.000607f, 0.000911f, 0.001214f, 0.001518f, 0.001821f, 0.002125f, 0.002428f, 0.002732f, 0.003035f, 0.003347f, 0.003677f, 0.004025f, 0.004391f, 0.004777f, 0.005182f, 0.005605f, 0.006049f, 0.006512f, 0.006995f, 0.007499f, 0.008023f, 0.008568f, 0.009134f, 0.009721f, 0.010330f, 0.010960f, 0.011612f, 0.012286f, 0.012983f, 0.013702f, 0.014444f, 0.015209f, 0.015996f, 0.016807f, 0.017642f, 0.018500f, 0.019382f, 0.020289f, 0.021219f, 0.022174f, 0.023153f, 0.024158f, 0.025187f, 0.026241f, 0.027321f, 0.028426f, 0.029557f, 0.030713f, 0.031896f, 0.033105f, 0.034340f, 0.035601f, 0.036889f, 0.038204f, 0.039546f, 0.040915f, 0.042311f, 0.043735f, 0.045186f, 0.046665f, 0.048172f, 0.049707f, 0.051269f, 0.052861f, 0.054480f, 0.056128f, 0.057805f, 0.059511f, 0.061246f, 0.063010f, 0.064803f, 0.066626f, 0.068478f, 0.070360f, 0.072272f, 0.074214f, 0.076185f, 0.078187f, 0.080220f, 0.082283f, 0.084376f, 0.086500f, 0.088656f, 0.090842f, 0.093059f, 0.095307f, 0.097587f, 0.099899f, 0.102242f, 0.104616f, 0.107023f, 0.109462f, 0.111932f, 0.114435f, 0.116971f, 0.119538f, 0.122139f, 0.124772f, 0.127438f, 0.130136f, 0.132868f, 0.135633f, 0.138432f, 0.141263f, 0.144128f, 0.147027f, 0.149960f, 0.152926f, 0.155926f, 0.158961f, 0.162029f, 0.165132f, 0.168269f, 0.171441f, 0.174647f, 0.177888f, 0.181164f, 0.184475f, 0.187821f, 0.191202f, 0.194618f, 0.198069f, 0.201556f, 0.205079f, 0.208637f, 0.212231f, 0.215861f, 0.219526f, 0.223228f, 0.226966f, 0.230740f, 0.234551f, 0.238398f, 0.242281f, 0.246201f, 0.250158f, 0.254152f, 0.258183f, 0.262251f, 0.266356f, 0.270498f, 0.274677f, 0.278894f, 0.283149f, 0.287441f, 0.291771f, 0.296138f, 0.300544f, 0.304987f, 0.309469f, 0.313989f, 0.318547f, 0.323143f, 0.327778f, 0.332452f, 0.337164f, 0.341914f, 0.346704f, 0.351533f, 0.356400f, 0.361307f, 0.366253f, 0.371238f, 0.376262f, 0.381326f, 0.386430f, 0.391573f, 0.396755f, 0.401978f, 0.407240f, 0.412543f, 0.417885f, 0.423268f, 0.428691f, 0.434154f, 0.439657f, 0.445201f, 0.450786f, 0.456411f, 0.462077f, 0.467784f, 0.473532f, 0.479320f, 0.485150f, 0.491021f, 0.496933f, 0.502887f, 0.508881f, 0.514918f, 0.520996f, 0.527115f, 0.533276f, 0.539480f, 0.545725f, 0.552011f, 0.558340f, 0.564712f, 0.571125f, 0.577581f, 0.584078f, 0.590619f, 0.597202f, 0.603827f, 0.610496f, 0.617207f, 0.623960f, 0.630757f, 0.637597f, 0.644480f, 0.651406f, 0.658375f, 0.665387f, 0.672443f, 0.679543f, 0.686685f, 0.693872f, 0.701102f, 0.708376f, 0.715694f, 0.723055f, 0.730461f, 0.737911f, 0.745404f, 0.752942f, 0.760525f, 0.768151f, 0.775822f, 0.783538f, 0.791298f, 0.799103f, 0.806952f, 0.814847f, 0.822786f, 0.830770f, 0.838799f, 0.846873f, 0.854993f, 0.863157f, 0.871367f, 0.879622f, 0.887923f, 0.896269f, 0.904661f, 0.913099f, 0.921582f, 0.930111f, 0.938686f, 0.947307f, 0.955974f, 0.964686f, 0.973445f, 0.982251f, 0.991102f, 1.0f}; static float stbir__srgb_to_linear(float f) { if (f <= 0.04045f) return f / 12.92f; else return (float)pow((f + 0.055f) / 1.055f, 2.4f); } static float stbir__linear_to_srgb(float f) { if (f <= 0.0031308f) return f * 12.92f; else return 1.055f * (float)pow(f, 1 / 2.4f) - 0.055f; } #ifndef STBIR_NON_IEEE_FLOAT // From https://gist.github.com/rygorous/2203834 typedef union { uint32_t u; float f; } stbir__FP32; static const uint32_t fp32_to_srgb8_tab4[104] = { 0x0073000d, 0x007a000d, 0x0080000d, 0x0087000d, 0x008d000d, 0x0094000d, 0x009a000d, 0x00a1000d, 0x00a7001a, 0x00b4001a, 0x00c1001a, 0x00ce001a, 0x00da001a, 0x00e7001a, 0x00f4001a, 0x0101001a, 0x010e0033, 0x01280033, 0x01410033, 0x015b0033, 0x01750033, 0x018f0033, 0x01a80033, 0x01c20033, 0x01dc0067, 0x020f0067, 0x02430067, 0x02760067, 0x02aa0067, 0x02dd0067, 0x03110067, 0x03440067, 0x037800ce, 0x03df00ce, 0x044600ce, 0x04ad00ce, 0x051400ce, 0x057b00c5, 0x05dd00bc, 0x063b00b5, 0x06970158, 0x07420142, 0x07e30130, 0x087b0120, 0x090b0112, 0x09940106, 0x0a1700fc, 0x0a9500f2, 0x0b0f01cb, 0x0bf401ae, 0x0ccb0195, 0x0d950180, 0x0e56016e, 0x0f0d015e, 0x0fbc0150, 0x10630143, 0x11070264, 0x1238023e, 0x1357021d, 0x14660201, 0x156601e9, 0x165a01d3, 0x174401c0, 0x182401af, 0x18fe0331, 0x1a9602fe, 0x1c1502d2, 0x1d7e02ad, 0x1ed4028d, 0x201a0270, 0x21520256, 0x227d0240, 0x239f0443, 0x25c003fe, 0x27bf03c4, 0x29a10392, 0x2b6a0367, 0x2d1d0341, 0x2ebe031f, 0x304d0300, 0x31d105b0, 0x34a80555, 0x37520507, 0x39d504c5, 0x3c37048b, 0x3e7c0458, 0x40a8042a, 0x42bd0401, 0x44c20798, 0x488e071e, 0x4c1c06b6, 0x4f76065d, 0x52a50610, 0x55ac05cc, 0x5892058f, 0x5b590559, 0x5e0c0a23, 0x631c0980, 0x67db08f6, 0x6c55087f, 0x70940818, 0x74a007bd, 0x787d076c, 0x7c330723, }; static uint8_t stbir__linear_to_srgb_uchar(float in) { static const stbir__FP32 almostone = {0x3f7fffff}; // 1-eps static const stbir__FP32 minval = {(127 - 13) << 23}; uint32_t tab, bias, scale, t; stbir__FP32 f; // Clamp to [2^(-13), 1-eps]; these two values map to 0 and 1, respectively. // The tests are carefully written so that NaNs map to 0, same as in the // reference implementation. if (!(in > minval.f)) // written this way to catch NaNs in = minval.f; if (in > almostone.f) in = almostone.f; // Do the table lookup and unpack bias, scale f.f = in; tab = fp32_to_srgb8_tab4[(f.u - minval.u) >> 20]; bias = (tab >> 16) << 9; scale = tab & 0xffff; // Grab next-highest mantissa bits and perform linear interpolation t = (f.u >> 12) & 0xff; return (unsigned char)((bias + scale * t) >> 16); } #else // sRGB transition values, scaled by 1<<28 static int stbir__srgb_offset_to_linear_scaled[256] = { 0, 40738, 122216, 203693, 285170, 366648, 448125, 529603, 611080, 692557, 774035, 855852, 942009, 1033024, 1128971, 1229926, 1335959, 1447142, 1563542, 1685229, 1812268, 1944725, 2082664, 2226148, 2375238, 2529996, 2690481, 2856753, 3028870, 3206888, 3390865, 3580856, 3776916, 3979100, 4187460, 4402049, 4622919, 4850123, 5083710, 5323731, 5570236, 5823273, 6082892, 6349140, 6622065, 6901714, 7188133, 7481369, 7781466, 8088471, 8402427, 8723380, 9051372, 9386448, 9728650, 10078021, 10434603, 10798439, 11169569, 11548036, 11933879, 12327139, 12727857, 13136073, 13551826, 13975156, 14406100, 14844697, 15290987, 15745007, 16206795, 16676389, 17153826, 17639142, 18132374, 18633560, 19142734, 19659934, 20185196, 20718552, 21260042, 21809696, 22367554, 22933648, 23508010, 24090680, 24681686, 25281066, 25888850, 26505076, 27129772, 27762974, 28404716, 29055026, 29713942, 30381490, 31057708, 31742624, 32436272, 33138682, 33849884, 34569912, 35298800, 36036568, 36783260, 37538896, 38303512, 39077136, 39859796, 40651528, 41452360, 42262316, 43081432, 43909732, 44747252, 45594016, 46450052, 47315392, 48190064, 49074096, 49967516, 50870356, 51782636, 52704392, 53635648, 54576432, 55526772, 56486700, 57456236, 58435408, 59424248, 60422780, 61431036, 62449032, 63476804, 64514376, 65561776, 66619028, 67686160, 68763192, 69850160, 70947088, 72053992, 73170912, 74297864, 75434880, 76581976, 77739184, 78906536, 80084040, 81271736, 82469648, 83677792, 84896192, 86124888, 87363888, 88613232, 89872928, 91143016, 92423512, 93714432, 95015816, 96327688, 97650056, 98982952, 100326408, 101680440, 103045072, 104420320, 105806224, 107202800, 108610064, 110028048, 111456776, 112896264, 114346544, 115807632, 117279552, 118762328, 120255976, 121760536, 123276016, 124802440, 126339832, 127888216, 129447616, 131018048, 132599544, 134192112, 135795792, 137410592, 139036528, 140673648, 142321952, 143981456, 145652208, 147334208, 149027488, 150732064, 152447968, 154175200, 155913792, 157663776, 159425168, 161197984, 162982240, 164777968, 166585184, 168403904, 170234160, 172075968, 173929344, 175794320, 177670896, 179559120, 181458992, 183370528, 185293776, 187228736, 189175424, 191133888, 193104112, 195086128, 197079968, 199085648, 201103184, 203132592, 205173888, 207227120, 209292272, 211369392, 213458480, 215559568, 217672656, 219797792, 221934976, 224084240, 226245600, 228419056, 230604656, 232802400, 235012320, 237234432, 239468736, 241715280, 243974080, 246245120, 248528464, 250824112, 253132064, 255452368, 257785040, 260130080, 262487520, 264857376, 267239664, }; static uint8_t stbir__linear_to_srgb_uchar(float f) { int x = (int)(f * (1 << 28)); // has headroom so you don't need to clamp int v = 0; int i; // Refine the guess with a short binary search. i = v + 128; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; i = v + 64; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; i = v + 32; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; i = v + 16; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; i = v + 8; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; i = v + 4; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; i = v + 2; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; i = v + 1; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i; return (uint8_t)v; } #endif static float stbir__filter_trapezoid(float x, float scale) { float halfscale = scale / 2; float t = 0.5f + halfscale; STBIR_ASSERT(scale <= 1); x = (float)fabs(x); if (x >= t) return 0; else { float r = 0.5f - halfscale; if (x <= r) return 1; else return (t - x) / scale; } } static float stbir__support_trapezoid(float scale) { STBIR_ASSERT(scale <= 1); return 0.5f + scale / 2; } static float stbir__filter_triangle(float x, float s) { STBIR__UNUSED_PARAM(s); x = (float)fabs(x); if (x <= 1.0f) return 1 - x; else return 0; } static float stbir__filter_cubic(float x, float s) { STBIR__UNUSED_PARAM(s); x = (float)fabs(x); if (x < 1.0f) return (4 + x * x * (3 * x - 6)) / 6; else if (x < 2.0f) return (8 + x * (-12 + x * (6 - x))) / 6; return (0.0f); } static float stbir__filter_catmullrom(float x, float s) { STBIR__UNUSED_PARAM(s); x = (float)fabs(x); if (x < 1.0f) return 1 - x * x * (2.5f - 1.5f * x); else if (x < 2.0f) return 2 - x * (4 + x * (0.5f * x - 2.5f)); return (0.0f); } static float stbir__filter_mitchell(float x, float s) { STBIR__UNUSED_PARAM(s); x = (float)fabs(x); if (x < 1.0f) return (16 + x * x * (21 * x - 36)) / 18; else if (x < 2.0f) return (32 + x * (-60 + x * (36 - 7 * x))) / 18; return (0.0f); } static float stbir__support_zero(float s) { STBIR__UNUSED_PARAM(s); return 0; } static float stbir__support_one(float s) { STBIR__UNUSED_PARAM(s); return 1; } static float stbir__support_two(float s) { STBIR__UNUSED_PARAM(s); return 2; } static stbir__filter_info stbir__filter_info_table[] = { {NULL, stbir__support_zero}, {stbir__filter_trapezoid, stbir__support_trapezoid}, {stbir__filter_triangle, stbir__support_one}, {stbir__filter_cubic, stbir__support_two}, {stbir__filter_catmullrom, stbir__support_two}, {stbir__filter_mitchell, stbir__support_two}, }; forceinline int stbir__use_upsampling(float ratio) { return ratio > 1; } forceinline int stbir__use_width_upsampling(stbir__info* stbir_info) { return stbir__use_upsampling(stbir_info->horizontal_scale); } forceinline int stbir__use_height_upsampling(stbir__info* stbir_info) { return stbir__use_upsampling(stbir_info->vertical_scale); } // This is the maximum number of input samples that can affect an output sample // with the given filter static int stbir__get_filter_pixel_width(stbir_filter filter, float scale) { STBIR_ASSERT(filter != 0); STBIR_ASSERT(filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); if (stbir__use_upsampling(scale)) return (int)ceil(stbir__filter_info_table[filter].support(1 / scale) * 2); else return (int)ceil(stbir__filter_info_table[filter].support(scale) * 2 / scale); } // This is how much to expand buffers to account for filters seeking outside // the image boundaries. static int stbir__get_filter_pixel_margin(stbir_filter filter, float scale) { return stbir__get_filter_pixel_width(filter, scale) / 2; } static int stbir__get_coefficient_width(stbir_filter filter, float scale) { if (stbir__use_upsampling(scale)) return (int)ceil(stbir__filter_info_table[filter].support(1 / scale) * 2); else return (int)ceil(stbir__filter_info_table[filter].support(scale) * 2); } static int stbir__get_contributors(float scale, stbir_filter filter, int input_size, int output_size) { if (stbir__use_upsampling(scale)) return output_size; else return (input_size + stbir__get_filter_pixel_margin(filter, scale) * 2); } static int stbir__get_total_horizontal_coefficients(stbir__info* info) { return info->horizontal_num_contributors * stbir__get_coefficient_width(info->horizontal_filter, info->horizontal_scale); } static int stbir__get_total_vertical_coefficients(stbir__info* info) { return info->vertical_num_contributors * stbir__get_coefficient_width(info->vertical_filter, info->vertical_scale); } static stbir__contributors* stbir__get_contributor( stbir__contributors* contributors, int n) { return &contributors[n]; } // For perf reasons this code is duplicated in // stbir__resample_horizontal_upsample/downsample, if you change it here change // it there too. static float* stbir__get_coefficient(float* coefficients, stbir_filter filter, float scale, int n, int c) { int width = stbir__get_coefficient_width(filter, scale); return &coefficients[width * n + c]; } static int stbir__edge_wrap_slow(stbir_edge edge, int n, int max) { switch (edge) { case STBIR_EDGE_ZERO: return 0; // we'll decode the wrong pixel here, and then overwrite with // 0s later case STBIR_EDGE_CLAMP: if (n < 0) return 0; if (n >= max) return max - 1; return n; // NOTREACHED case STBIR_EDGE_REFLECT: { if (n < 0) { if (n < max) return -n; else return max - 1; } if (n >= max) { int max2 = max * 2; if (n >= max2) return 0; else return max2 - n - 1; } return n; // NOTREACHED } case STBIR_EDGE_WRAP: if (n >= 0) return (n % max); else { int m = (-n) % max; if (m != 0) m = max - m; return (m); } // NOTREACHED default: STBIR_ASSERT(!"Unimplemented edge type"); return 0; } } forceinline int stbir__edge_wrap(stbir_edge edge, int n, int max) { // avoid per-pixel switch if (n >= 0 && n < max) return n; return stbir__edge_wrap_slow(edge, n, max); } // What input pixels contribute to this output pixel? static void stbir__calculate_sample_range_upsample( int n, float out_filter_radius, float scale_ratio, float out_shift, int* in_first_pixel, int* in_last_pixel, float* in_center_of_out) { float out_pixel_center = (float)n + 0.5f; float out_pixel_influence_lowerbound = out_pixel_center - out_filter_radius; float out_pixel_influence_upperbound = out_pixel_center + out_filter_radius; float in_pixel_influence_lowerbound = (out_pixel_influence_lowerbound + out_shift) / scale_ratio; float in_pixel_influence_upperbound = (out_pixel_influence_upperbound + out_shift) / scale_ratio; *in_center_of_out = (out_pixel_center + out_shift) / scale_ratio; *in_first_pixel = (int)(floor(in_pixel_influence_lowerbound + 0.5)); *in_last_pixel = (int)(floor(in_pixel_influence_upperbound - 0.5)); } // What output pixels does this input pixel contribute to? static void stbir__calculate_sample_range_downsample( int n, float in_pixels_radius, float scale_ratio, float out_shift, int* out_first_pixel, int* out_last_pixel, float* out_center_of_in) { float in_pixel_center = (float)n + 0.5f; float in_pixel_influence_lowerbound = in_pixel_center - in_pixels_radius; float in_pixel_influence_upperbound = in_pixel_center + in_pixels_radius; float out_pixel_influence_lowerbound = in_pixel_influence_lowerbound * scale_ratio - out_shift; float out_pixel_influence_upperbound = in_pixel_influence_upperbound * scale_ratio - out_shift; *out_center_of_in = in_pixel_center * scale_ratio - out_shift; *out_first_pixel = (int)(floor(out_pixel_influence_lowerbound + 0.5)); *out_last_pixel = (int)(floor(out_pixel_influence_upperbound - 0.5)); } static void stbir__calculate_coefficients_upsample( stbir_filter filter, float scale, int in_first_pixel, int in_last_pixel, float in_center_of_out, stbir__contributors* contributor, float* coefficient_group) { int i; float total_filter = 0; float filter_scale; STBIR_ASSERT( in_last_pixel - in_first_pixel <= (int)ceil(stbir__filter_info_table[filter].support(1 / scale) * 2)); // Taken directly from stbir__get_coefficient_width() // which we can't call because we don't know if we're // horizontal or vertical. contributor->n0 = in_first_pixel; contributor->n1 = in_last_pixel; STBIR_ASSERT(contributor->n1 >= contributor->n0); for (i = 0; i <= in_last_pixel - in_first_pixel; i++) { float in_pixel_center = (float)(i + in_first_pixel) + 0.5f; coefficient_group[i] = stbir__filter_info_table[filter].kernel( in_center_of_out - in_pixel_center, 1 / scale); // If the coefficient is zero, skip it. (Don't do the <0 check here, we want // the influence of those outside pixels.) if (i == 0 && !coefficient_group[i]) { contributor->n0 = ++in_first_pixel; i--; continue; } total_filter += coefficient_group[i]; } STBIR_ASSERT(stbir__filter_info_table[filter].kernel( (float)(in_last_pixel + 1) + 0.5f - in_center_of_out, 1 / scale) == 0); STBIR_ASSERT(total_filter > 0.9); STBIR_ASSERT(total_filter < 1.1f); // Make sure it's not way off. // Make sure the sum of all coefficients is 1. filter_scale = 1 / total_filter; for (i = 0; i <= in_last_pixel - in_first_pixel; i++) coefficient_group[i] *= filter_scale; for (i = in_last_pixel - in_first_pixel; i >= 0; i--) { if (coefficient_group[i]) break; // This line has no weight. We can skip it. contributor->n1 = contributor->n0 + i - 1; } } static void stbir__calculate_coefficients_downsample( stbir_filter filter, float scale_ratio, int out_first_pixel, int out_last_pixel, float out_center_of_in, stbir__contributors* contributor, float* coefficient_group) { int i; STBIR_ASSERT( out_last_pixel - out_first_pixel <= (int)ceil(stbir__filter_info_table[filter].support(scale_ratio) * 2)); // Taken directly from stbir__get_coefficient_width() // which we can't call because we don't know if we're // horizontal or vertical. contributor->n0 = out_first_pixel; contributor->n1 = out_last_pixel; STBIR_ASSERT(contributor->n1 >= contributor->n0); for (i = 0; i <= out_last_pixel - out_first_pixel; i++) { float out_pixel_center = (float)(i + out_first_pixel) + 0.5f; float x = out_pixel_center - out_center_of_in; coefficient_group[i] = stbir__filter_info_table[filter].kernel(x, scale_ratio) * scale_ratio; } STBIR_ASSERT(stbir__filter_info_table[filter].kernel( (float)(out_last_pixel + 1) + 0.5f - out_center_of_in, scale_ratio) == 0); for (i = out_last_pixel - out_first_pixel; i >= 0; i--) { if (coefficient_group[i]) break; // This line has no weight. We can skip it. contributor->n1 = contributor->n0 + i - 1; } } static void stbir__normalize_downsample_coefficients( stbir__contributors* contributors, float* coefficients, stbir_filter filter, float scale_ratio, int input_size, int output_size) { int num_contributors = stbir__get_contributors(scale_ratio, filter, input_size, output_size); int num_coefficients = stbir__get_coefficient_width(filter, scale_ratio); int i, j; int skip; for (i = 0; i < output_size; i++) { float scale; float total = 0; for (j = 0; j < num_contributors; j++) { if (i >= contributors[j].n0 && i <= contributors[j].n1) { float coefficient = *stbir__get_coefficient( coefficients, filter, scale_ratio, j, i - contributors[j].n0); total += coefficient; } else if (i < contributors[j].n0) break; } STBIR_ASSERT(total > 0.9f); STBIR_ASSERT(total < 1.1f); scale = 1 / total; for (j = 0; j < num_contributors; j++) { if (i >= contributors[j].n0 && i <= contributors[j].n1) *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i - contributors[j].n0) *= scale; else if (i < contributors[j].n0) break; } } // Optimize: Skip zero coefficients and contributions outside of image bounds. // Do this after normalizing because normalization depends on the n0/n1 // values. for (j = 0; j < num_contributors; j++) { int range, max, width; skip = 0; while (*stbir__get_coefficient(coefficients, filter, scale_ratio, j, skip) == 0) skip++; contributors[j].n0 += skip; while (contributors[j].n0 < 0) { contributors[j].n0++; skip++; } range = contributors[j].n1 - contributors[j].n0 + 1; max = stbir__min(num_coefficients, range); width = stbir__get_coefficient_width(filter, scale_ratio); for (i = 0; i < max; i++) { if (i + skip >= width) break; *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i) = *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i + skip); } continue; } // Using min to avoid writing into invalid pixels. for (i = 0; i < num_contributors; i++) contributors[i].n1 = stbir__min(contributors[i].n1, output_size - 1); } // Each scan line uses the same kernel values so we should calculate the kernel // values once and then we can use them for every scan line. static void stbir__calculate_filters(stbir__contributors* contributors, float* coefficients, stbir_filter filter, float scale_ratio, float shift, int input_size, int output_size) { int n; int total_contributors = stbir__get_contributors(scale_ratio, filter, input_size, output_size); if (stbir__use_upsampling(scale_ratio)) { float out_pixels_radius = stbir__filter_info_table[filter].support(1 / scale_ratio) * scale_ratio; // Looping through out pixels for (n = 0; n < total_contributors; n++) { float in_center_of_out; // Center of the current out pixel in the in // pixel space int in_first_pixel, in_last_pixel; stbir__calculate_sample_range_upsample(n, out_pixels_radius, scale_ratio, shift, &in_first_pixel, &in_last_pixel, &in_center_of_out); stbir__calculate_coefficients_upsample( filter, scale_ratio, in_first_pixel, in_last_pixel, in_center_of_out, stbir__get_contributor(contributors, n), stbir__get_coefficient(coefficients, filter, scale_ratio, n, 0)); } } else { float in_pixels_radius = stbir__filter_info_table[filter].support(scale_ratio) / scale_ratio; // Looping through in pixels for (n = 0; n < total_contributors; n++) { float out_center_of_in; // Center of the current out pixel in the in // pixel space int out_first_pixel, out_last_pixel; int n_adjusted = n - stbir__get_filter_pixel_margin(filter, scale_ratio); stbir__calculate_sample_range_downsample( n_adjusted, in_pixels_radius, scale_ratio, shift, &out_first_pixel, &out_last_pixel, &out_center_of_in); stbir__calculate_coefficients_downsample( filter, scale_ratio, out_first_pixel, out_last_pixel, out_center_of_in, stbir__get_contributor(contributors, n), stbir__get_coefficient(coefficients, filter, scale_ratio, n, 0)); } stbir__normalize_downsample_coefficients(contributors, coefficients, filter, scale_ratio, input_size, output_size); } } static float* stbir__get_decode_buffer(stbir__info* stbir_info) { // The 0 index of the decode buffer starts after the margin. This makes // it okay to use negative indexes on the decode buffer. return &stbir_info->decode_buffer[stbir_info->horizontal_filter_pixel_margin * stbir_info->channels]; } #define STBIR__DECODE(type, colorspace) \ ((type) * (STBIR_MAX_COLORSPACES) + (colorspace)) static optimizespeed void stbir__decode_scanline(stbir__info* stbir_info, int n) { int c; int channels = stbir_info->channels; int alpha_channel = stbir_info->alpha_channel; int type = stbir_info->type; int colorspace = stbir_info->colorspace; int input_w = stbir_info->input_w; size_t input_stride_bytes = stbir_info->input_stride_bytes; float* decode_buffer = stbir__get_decode_buffer(stbir_info); stbir_edge edge_horizontal = stbir_info->edge_horizontal; stbir_edge edge_vertical = stbir_info->edge_vertical; size_t in_buffer_row_offset = stbir__edge_wrap(edge_vertical, n, stbir_info->input_h) * input_stride_bytes; const void* input_data = (char*)stbir_info->input_data + in_buffer_row_offset; int max_x = input_w + stbir_info->horizontal_filter_pixel_margin; int decode = STBIR__DECODE(type, colorspace); int x = -stbir_info->horizontal_filter_pixel_margin; // special handling for STBIR_EDGE_ZERO because it needs to return an item // that doesn't appear in the input, and we want to avoid paying overhead on // every pixel if not STBIR_EDGE_ZERO if (edge_vertical == STBIR_EDGE_ZERO && (n < 0 || n >= stbir_info->input_h)) { for (; x < max_x; x++) for (c = 0; c < channels; c++) decode_buffer[x * channels + c] = 0; return; } switch (decode) { case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_LINEAR): for (; x < max_x; x++) { int decode_pixel_index = x * channels; int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; for (c = 0; c < channels; c++) decode_buffer[decode_pixel_index + c] = ((float)(( const unsigned char*)input_data)[input_pixel_index + c]) / stbir__max_uint8_as_float; } break; case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_SRGB): for (; x < max_x; x++) { int decode_pixel_index = x * channels; int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; for (c = 0; c < channels; c++) decode_buffer[decode_pixel_index + c] = stbir__srgb_uchar_to_linear_float[( (const unsigned char*)input_data)[input_pixel_index + c]]; if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) decode_buffer[decode_pixel_index + alpha_channel] = ((float)((const unsigned char*) input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint8_as_float; } break; case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_LINEAR): for (; x < max_x; x++) { int decode_pixel_index = x * channels; int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; for (c = 0; c < channels; c++) decode_buffer[decode_pixel_index + c] = ((float)(( const unsigned short*)input_data)[input_pixel_index + c]) / stbir__max_uint16_as_float; } break; case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_SRGB): for (; x < max_x; x++) { int decode_pixel_index = x * channels; int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; for (c = 0; c < channels; c++) decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear( ((float)(( const unsigned short*)input_data)[input_pixel_index + c]) / stbir__max_uint16_as_float); if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) decode_buffer[decode_pixel_index + alpha_channel] = ((float)((const unsigned short*) input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint16_as_float; } break; case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_LINEAR): for (; x < max_x; x++) { int decode_pixel_index = x * channels; int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; for (c = 0; c < channels; c++) decode_buffer[decode_pixel_index + c] = (float)(((double)((const unsigned int*) input_data)[input_pixel_index + c]) / stbir__max_uint32_as_float); } break; case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_SRGB): for (; x < max_x; x++) { int decode_pixel_index = x * channels; int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; for (c = 0; c < channels; c++) decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear( (float)(((double)((const unsigned int*) input_data)[input_pixel_index + c]) / stbir__max_uint32_as_float)); if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) decode_buffer[decode_pixel_index + alpha_channel] = (float)(((double)(( const unsigned int*)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint32_as_float); } break; case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_LINEAR): for (; x < max_x; x++) { int decode_pixel_index = x * channels; int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; for (c = 0; c < channels; c++) decode_buffer[decode_pixel_index + c] = ((const float*)input_data)[input_pixel_index + c]; } break; case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_SRGB): for (; x < max_x; x++) { int decode_pixel_index = x * channels; int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels; for (c = 0; c < channels; c++) decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear( ((const float*)input_data)[input_pixel_index + c]); if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) decode_buffer[decode_pixel_index + alpha_channel] = ((const float*)input_data)[input_pixel_index + alpha_channel]; } break; default: STBIR_ASSERT(!"Unknown type/colorspace/channels combination."); break; } if (!(stbir_info->flags & STBIR_FLAG_ALPHA_PREMULTIPLIED)) { for (x = -stbir_info->horizontal_filter_pixel_margin; x < max_x; x++) { int decode_pixel_index = x * channels; // If the alpha value is 0 it will clobber the color values. Make sure // it's not. float alpha = decode_buffer[decode_pixel_index + alpha_channel]; #ifndef STBIR_NO_ALPHA_EPSILON if (stbir_info->type != STBIR_TYPE_FLOAT) { alpha += STBIR_ALPHA_EPSILON; decode_buffer[decode_pixel_index + alpha_channel] = alpha; } #endif for (c = 0; c < channels; c++) { if (c == alpha_channel) continue; decode_buffer[decode_pixel_index + c] *= alpha; } } } if (edge_horizontal == STBIR_EDGE_ZERO) { for (x = -stbir_info->horizontal_filter_pixel_margin; x < 0; x++) { for (c = 0; c < channels; c++) decode_buffer[x * channels + c] = 0; } for (x = input_w; x < max_x; x++) { for (c = 0; c < channels; c++) decode_buffer[x * channels + c] = 0; } } } static float* stbir__get_ring_buffer_entry(float* ring_buffer, int index, int ring_buffer_length) { return &ring_buffer[index * ring_buffer_length]; } static float* stbir__add_empty_ring_buffer_entry(stbir__info* stbir_info, int n) { int ring_buffer_index; float* ring_buffer; stbir_info->ring_buffer_last_scanline = n; if (stbir_info->ring_buffer_begin_index < 0) { ring_buffer_index = stbir_info->ring_buffer_begin_index = 0; stbir_info->ring_buffer_first_scanline = n; } else { ring_buffer_index = (stbir_info->ring_buffer_begin_index + (stbir_info->ring_buffer_last_scanline - stbir_info->ring_buffer_first_scanline)) % stbir_info->ring_buffer_num_entries; STBIR_ASSERT(ring_buffer_index != stbir_info->ring_buffer_begin_index); } ring_buffer = stbir__get_ring_buffer_entry( stbir_info->ring_buffer, ring_buffer_index, stbir_info->ring_buffer_length_bytes / sizeof(float)); bzero(ring_buffer, stbir_info->ring_buffer_length_bytes); return ring_buffer; } static void stbir__resample_horizontal_upsample(stbir__info* stbir_info, float* output_buffer) { int x, k; int output_w = stbir_info->output_w; int channels = stbir_info->channels; float* decode_buffer = stbir__get_decode_buffer(stbir_info); stbir__contributors* horizontal_contributors = stbir_info->horizontal_contributors; float* horizontal_coefficients = stbir_info->horizontal_coefficients; int coefficient_width = stbir_info->horizontal_coefficient_width; for (x = 0; x < output_w; x++) { int n0 = horizontal_contributors[x].n0; int n1 = horizontal_contributors[x].n1; int out_pixel_index = x * channels; int coefficient_group = coefficient_width * x; int coefficient_counter = 0; STBIR_ASSERT(n1 >= n0); STBIR_ASSERT(n0 >= -stbir_info->horizontal_filter_pixel_margin); STBIR_ASSERT(n1 >= -stbir_info->horizontal_filter_pixel_margin); STBIR_ASSERT(n0 < stbir_info->input_w + stbir_info->horizontal_filter_pixel_margin); STBIR_ASSERT(n1 < stbir_info->input_w + stbir_info->horizontal_filter_pixel_margin); switch (channels) { case 1: for (k = n0; k <= n1; k++) { int in_pixel_index = k * 1; float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; STBIR_ASSERT(coefficient != 0); output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; } break; case 2: for (k = n0; k <= n1; k++) { int in_pixel_index = k * 2; float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; STBIR_ASSERT(coefficient != 0); output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; } break; case 3: for (k = n0; k <= n1; k++) { int in_pixel_index = k * 3; float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; STBIR_ASSERT(coefficient != 0); output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; } break; case 4: for (k = n0; k <= n1; k++) { int in_pixel_index = k * 4; float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; STBIR_ASSERT(coefficient != 0); output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; output_buffer[out_pixel_index + 3] += decode_buffer[in_pixel_index + 3] * coefficient; } break; default: for (k = n0; k <= n1; k++) { int in_pixel_index = k * channels; float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++]; int c; STBIR_ASSERT(coefficient != 0); for (c = 0; c < channels; c++) output_buffer[out_pixel_index + c] += decode_buffer[in_pixel_index + c] * coefficient; } break; } } } static void stbir__resample_horizontal_downsample(stbir__info* stbir_info, float* output_buffer) { int x, k; int input_w = stbir_info->input_w; int channels = stbir_info->channels; float* decode_buffer = stbir__get_decode_buffer(stbir_info); stbir__contributors* horizontal_contributors = stbir_info->horizontal_contributors; float* horizontal_coefficients = stbir_info->horizontal_coefficients; int coefficient_width = stbir_info->horizontal_coefficient_width; int filter_pixel_margin = stbir_info->horizontal_filter_pixel_margin; int max_x = input_w + filter_pixel_margin * 2; STBIR_ASSERT(!stbir__use_width_upsampling(stbir_info)); switch (channels) { case 1: for (x = 0; x < max_x; x++) { int n0 = horizontal_contributors[x].n0; int n1 = horizontal_contributors[x].n1; int in_x = x - filter_pixel_margin; int in_pixel_index = in_x * 1; int max_n = n1; int coefficient_group = coefficient_width * x; for (k = n0; k <= max_n; k++) { int out_pixel_index = k * 1; float coefficient = horizontal_coefficients[coefficient_group + k - n0]; STBIR_ASSERT(coefficient != 0); output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; } } break; case 2: for (x = 0; x < max_x; x++) { int n0 = horizontal_contributors[x].n0; int n1 = horizontal_contributors[x].n1; int in_x = x - filter_pixel_margin; int in_pixel_index = in_x * 2; int max_n = n1; int coefficient_group = coefficient_width * x; for (k = n0; k <= max_n; k++) { int out_pixel_index = k * 2; float coefficient = horizontal_coefficients[coefficient_group + k - n0]; STBIR_ASSERT(coefficient != 0); output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; } } break; case 3: for (x = 0; x < max_x; x++) { int n0 = horizontal_contributors[x].n0; int n1 = horizontal_contributors[x].n1; int in_x = x - filter_pixel_margin; int in_pixel_index = in_x * 3; int max_n = n1; int coefficient_group = coefficient_width * x; for (k = n0; k <= max_n; k++) { int out_pixel_index = k * 3; float coefficient = horizontal_coefficients[coefficient_group + k - n0]; STBIR_ASSERT(coefficient != 0); output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; } } break; case 4: for (x = 0; x < max_x; x++) { int n0 = horizontal_contributors[x].n0; int n1 = horizontal_contributors[x].n1; int in_x = x - filter_pixel_margin; int in_pixel_index = in_x * 4; int max_n = n1; int coefficient_group = coefficient_width * x; for (k = n0; k <= max_n; k++) { int out_pixel_index = k * 4; float coefficient = horizontal_coefficients[coefficient_group + k - n0]; STBIR_ASSERT(coefficient != 0); output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient; output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient; output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient; output_buffer[out_pixel_index + 3] += decode_buffer[in_pixel_index + 3] * coefficient; } } break; default: for (x = 0; x < max_x; x++) { int n0 = horizontal_contributors[x].n0; int n1 = horizontal_contributors[x].n1; int in_x = x - filter_pixel_margin; int in_pixel_index = in_x * channels; int max_n = n1; int coefficient_group = coefficient_width * x; for (k = n0; k <= max_n; k++) { int c; int out_pixel_index = k * channels; float coefficient = horizontal_coefficients[coefficient_group + k - n0]; STBIR_ASSERT(coefficient != 0); for (c = 0; c < channels; c++) output_buffer[out_pixel_index + c] += decode_buffer[in_pixel_index + c] * coefficient; } } break; } } static void stbir__decode_and_resample_upsample(stbir__info* stbir_info, int n) { // Decode the nth scanline from the source image into the decode buffer. stbir__decode_scanline(stbir_info, n); // Now resample it into the ring buffer. if (stbir__use_width_upsampling(stbir_info)) stbir__resample_horizontal_upsample( stbir_info, stbir__add_empty_ring_buffer_entry(stbir_info, n)); else stbir__resample_horizontal_downsample( stbir_info, stbir__add_empty_ring_buffer_entry(stbir_info, n)); // Now it's sitting in the ring buffer ready to be used as source for the // vertical sampling. } static void stbir__decode_and_resample_downsample(stbir__info* stbir_info, int n) { // Decode the nth scanline from the source image into the decode buffer. stbir__decode_scanline(stbir_info, n); bzero(stbir_info->horizontal_buffer, stbir_info->output_w * stbir_info->channels * sizeof(float)); // Now resample it into the horizontal buffer. if (stbir__use_width_upsampling(stbir_info)) stbir__resample_horizontal_upsample(stbir_info, stbir_info->horizontal_buffer); else stbir__resample_horizontal_downsample(stbir_info, stbir_info->horizontal_buffer); // Now it's sitting in the horizontal buffer ready to be distributed into the // ring buffers. } // Get the specified scan line from the ring buffer. static float* stbir__get_ring_buffer_scanline( int get_scanline, float* ring_buffer, int begin_index, int first_scanline, int ring_buffer_num_entries, int ring_buffer_length) { int ring_buffer_index = (begin_index + (get_scanline - first_scanline)) % ring_buffer_num_entries; return stbir__get_ring_buffer_entry(ring_buffer, ring_buffer_index, ring_buffer_length); } static void stbir__encode_scanline(stbir__info* stbir_info, int num_pixels, void* output_buffer, float* encode_buffer, int channels, int alpha_channel, int decode) { int x; int n; int num_nonalpha; uint16_t nonalpha[STBIR_MAX_CHANNELS]; if (!(stbir_info->flags & STBIR_FLAG_ALPHA_PREMULTIPLIED)) { for (x = 0; x < num_pixels; ++x) { int pixel_index = x * channels; float alpha = encode_buffer[pixel_index + alpha_channel]; float reciprocal_alpha = alpha ? 1.0f / alpha : 0; // unrolling this produced a 1% slowdown upscaling a large RGBA // linear-space image on my machine - stb for (n = 0; n < channels; n++) if (n != alpha_channel) encode_buffer[pixel_index + n] *= reciprocal_alpha; // We added in a small epsilon to prevent the color channel from being // deleted with zero alpha. Because we only add it for integer types, it // will automatically be discarded on integer conversion, so we don't need // to subtract it back out (which would be problematic for numeric // precision reasons). } } // build a table of all channels that need colorspace correction, so // we don't perform colorspace correction on channels that don't need it. for (x = 0, num_nonalpha = 0; x < channels; ++x) { if (x != alpha_channel || (stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) { nonalpha[num_nonalpha++] = (uint16_t)x; } } #define STBIR__ROUND_INT(f) ((int)((f) + 0.5)) #define STBIR__ROUND_UINT(f) ((uint32_t)((f) + 0.5)) #ifdef STBIR__SATURATE_INT #define STBIR__ENCODE_LINEAR8(f) \ stbir__saturate8(STBIR__ROUND_INT((f)*stbir__max_uint8_as_float)) #define STBIR__ENCODE_LINEAR16(f) \ stbir__saturate16(STBIR__ROUND_INT((f)*stbir__max_uint16_as_float)) #else #define STBIR__ENCODE_LINEAR8(f) \ (unsigned char)STBIR__ROUND_INT(stbir__saturate(f) * \ stbir__max_uint8_as_float) #define STBIR__ENCODE_LINEAR16(f) \ (unsigned short)STBIR__ROUND_INT(stbir__saturate(f) * \ stbir__max_uint16_as_float) #endif switch (decode) { case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_LINEAR): for (x = 0; x < num_pixels; ++x) { int pixel_index = x * channels; for (n = 0; n < channels; n++) { int index = pixel_index + n; ((unsigned char*)output_buffer)[index] = STBIR__ENCODE_LINEAR8(encode_buffer[index]); } } break; case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_SRGB): for (x = 0; x < num_pixels; ++x) { int pixel_index = x * channels; for (n = 0; n < num_nonalpha; n++) { int index = pixel_index + nonalpha[n]; ((unsigned char*)output_buffer)[index] = stbir__linear_to_srgb_uchar(encode_buffer[index]); } if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) ((unsigned char*)output_buffer)[pixel_index + alpha_channel] = STBIR__ENCODE_LINEAR8(encode_buffer[pixel_index + alpha_channel]); } break; case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_LINEAR): for (x = 0; x < num_pixels; ++x) { int pixel_index = x * channels; for (n = 0; n < channels; n++) { int index = pixel_index + n; ((unsigned short*)output_buffer)[index] = STBIR__ENCODE_LINEAR16(encode_buffer[index]); } } break; case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_SRGB): for (x = 0; x < num_pixels; ++x) { int pixel_index = x * channels; for (n = 0; n < num_nonalpha; n++) { int index = pixel_index + nonalpha[n]; ((unsigned short*)output_buffer)[index] = (unsigned short)STBIR__ROUND_INT( stbir__linear_to_srgb(stbir__saturate(encode_buffer[index])) * stbir__max_uint16_as_float); } if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) ((unsigned short*)output_buffer)[pixel_index + alpha_channel] = STBIR__ENCODE_LINEAR16( encode_buffer[pixel_index + alpha_channel]); } break; case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_LINEAR): for (x = 0; x < num_pixels; ++x) { int pixel_index = x * channels; for (n = 0; n < channels; n++) { int index = pixel_index + n; ((unsigned int*)output_buffer)[index] = (unsigned int)STBIR__ROUND_UINT( ((double)stbir__saturate(encode_buffer[index])) * stbir__max_uint32_as_float); } } break; case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_SRGB): for (x = 0; x < num_pixels; ++x) { int pixel_index = x * channels; for (n = 0; n < num_nonalpha; n++) { int index = pixel_index + nonalpha[n]; ((unsigned int*)output_buffer)[index] = (unsigned int)STBIR__ROUND_UINT( ((double)stbir__linear_to_srgb( stbir__saturate(encode_buffer[index]))) * stbir__max_uint32_as_float); } if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) ((unsigned int*)output_buffer)[pixel_index + alpha_channel] = (unsigned int)STBIR__ROUND_INT( ((double)stbir__saturate( encode_buffer[pixel_index + alpha_channel])) * stbir__max_uint32_as_float); } break; case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_LINEAR): for (x = 0; x < num_pixels; ++x) { int pixel_index = x * channels; for (n = 0; n < channels; n++) { int index = pixel_index + n; ((float*)output_buffer)[index] = encode_buffer[index]; } } break; case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_SRGB): for (x = 0; x < num_pixels; ++x) { int pixel_index = x * channels; for (n = 0; n < num_nonalpha; n++) { int index = pixel_index + nonalpha[n]; ((float*)output_buffer)[index] = stbir__linear_to_srgb(encode_buffer[index]); } if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE)) ((float*)output_buffer)[pixel_index + alpha_channel] = encode_buffer[pixel_index + alpha_channel]; } break; default: STBIR_ASSERT(!"Unknown type/colorspace/channels combination."); break; } } static void stbir__resample_vertical_upsample(stbir__info* stbir_info, int n) { int x, k; int output_w = stbir_info->output_w; stbir__contributors* vertical_contributors = stbir_info->vertical_contributors; float* vertical_coefficients = stbir_info->vertical_coefficients; int channels = stbir_info->channels; int alpha_channel = stbir_info->alpha_channel; int type = stbir_info->type; int colorspace = stbir_info->colorspace; int ring_buffer_entries = stbir_info->ring_buffer_num_entries; void* output_data = stbir_info->output_data; float* encode_buffer = stbir_info->encode_buffer; int decode = STBIR__DECODE(type, colorspace); int coefficient_width = stbir_info->vertical_coefficient_width; int coefficient_counter; int contributor = n; float* ring_buffer = stbir_info->ring_buffer; int ring_buffer_begin_index = stbir_info->ring_buffer_begin_index; int ring_buffer_first_scanline = stbir_info->ring_buffer_first_scanline; int ring_buffer_length = stbir_info->ring_buffer_length_bytes / sizeof(float); int n0, n1, output_row_start; int coefficient_group = coefficient_width * contributor; n0 = vertical_contributors[contributor].n0; n1 = vertical_contributors[contributor].n1; output_row_start = n * stbir_info->output_stride_bytes; STBIR_ASSERT(stbir__use_height_upsampling(stbir_info)); bzero(encode_buffer, output_w * sizeof(float) * channels); // I tried reblocking this for better cache usage of encode_buffer // (using x_outer, k, x_inner), but it lost speed. -- stb coefficient_counter = 0; switch (channels) { case 1: for (k = n0; k <= n1; k++) { int coefficient_index = coefficient_counter++; float* ring_buffer_entry = stbir__get_ring_buffer_scanline( k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; for (x = 0; x < output_w; ++x) { int in_pixel_index = x * 1; encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; } } break; case 2: for (k = n0; k <= n1; k++) { int coefficient_index = coefficient_counter++; float* ring_buffer_entry = stbir__get_ring_buffer_scanline( k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; for (x = 0; x < output_w; ++x) { int in_pixel_index = x * 2; encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; } } break; case 3: for (k = n0; k <= n1; k++) { int coefficient_index = coefficient_counter++; float* ring_buffer_entry = stbir__get_ring_buffer_scanline( k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; for (x = 0; x < output_w; ++x) { int in_pixel_index = x * 3; encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; encode_buffer[in_pixel_index + 2] += ring_buffer_entry[in_pixel_index + 2] * coefficient; } } break; case 4: for (k = n0; k <= n1; k++) { int coefficient_index = coefficient_counter++; float* ring_buffer_entry = stbir__get_ring_buffer_scanline( k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; for (x = 0; x < output_w; ++x) { int in_pixel_index = x * 4; encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient; encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient; encode_buffer[in_pixel_index + 2] += ring_buffer_entry[in_pixel_index + 2] * coefficient; encode_buffer[in_pixel_index + 3] += ring_buffer_entry[in_pixel_index + 3] * coefficient; } } break; default: for (k = n0; k <= n1; k++) { int coefficient_index = coefficient_counter++; float* ring_buffer_entry = stbir__get_ring_buffer_scanline( k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; for (x = 0; x < output_w; ++x) { int in_pixel_index = x * channels; int c; for (c = 0; c < channels; c++) encode_buffer[in_pixel_index + c] += ring_buffer_entry[in_pixel_index + c] * coefficient; } } break; } stbir__encode_scanline(stbir_info, output_w, (char*)output_data + output_row_start, encode_buffer, channels, alpha_channel, decode); } static void stbir__resample_vertical_downsample(stbir__info* stbir_info, int n) { int x, k; int output_w = stbir_info->output_w; stbir__contributors* vertical_contributors = stbir_info->vertical_contributors; float* vertical_coefficients = stbir_info->vertical_coefficients; int channels = stbir_info->channels; int ring_buffer_entries = stbir_info->ring_buffer_num_entries; float* horizontal_buffer = stbir_info->horizontal_buffer; int coefficient_width = stbir_info->vertical_coefficient_width; int contributor = n + stbir_info->vertical_filter_pixel_margin; float* ring_buffer = stbir_info->ring_buffer; int ring_buffer_begin_index = stbir_info->ring_buffer_begin_index; int ring_buffer_first_scanline = stbir_info->ring_buffer_first_scanline; int ring_buffer_length = stbir_info->ring_buffer_length_bytes / sizeof(float); int n0, n1; n0 = vertical_contributors[contributor].n0; n1 = vertical_contributors[contributor].n1; STBIR_ASSERT(!stbir__use_height_upsampling(stbir_info)); for (k = n0; k <= n1; k++) { int coefficient_index = k - n0; int coefficient_group = coefficient_width * contributor; float coefficient = vertical_coefficients[coefficient_group + coefficient_index]; float* ring_buffer_entry = stbir__get_ring_buffer_scanline( k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length); switch (channels) { case 1: for (x = 0; x < output_w; x++) { int in_pixel_index = x * 1; ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; } break; case 2: for (x = 0; x < output_w; x++) { int in_pixel_index = x * 2; ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; } break; case 3: for (x = 0; x < output_w; x++) { int in_pixel_index = x * 3; ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; ring_buffer_entry[in_pixel_index + 2] += horizontal_buffer[in_pixel_index + 2] * coefficient; } break; case 4: for (x = 0; x < output_w; x++) { int in_pixel_index = x * 4; ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient; ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient; ring_buffer_entry[in_pixel_index + 2] += horizontal_buffer[in_pixel_index + 2] * coefficient; ring_buffer_entry[in_pixel_index + 3] += horizontal_buffer[in_pixel_index + 3] * coefficient; } break; default: for (x = 0; x < output_w; x++) { int in_pixel_index = x * channels; int c; for (c = 0; c < channels; c++) ring_buffer_entry[in_pixel_index + c] += horizontal_buffer[in_pixel_index + c] * coefficient; } break; } } } static void stbir__buffer_loop_upsample(stbir__info* stbir_info) { int y; float scale_ratio = stbir_info->vertical_scale; float out_scanlines_radius = stbir__filter_info_table[stbir_info->vertical_filter].support( 1 / scale_ratio) * scale_ratio; STBIR_ASSERT(stbir__use_height_upsampling(stbir_info)); for (y = 0; y < stbir_info->output_h; y++) { float in_center_of_out = 0; // Center of the current out scanline in the in scanline space int in_first_scanline = 0, in_last_scanline = 0; stbir__calculate_sample_range_upsample( y, out_scanlines_radius, scale_ratio, stbir_info->vertical_shift, &in_first_scanline, &in_last_scanline, &in_center_of_out); STBIR_ASSERT(in_last_scanline - in_first_scanline + 1 <= stbir_info->ring_buffer_num_entries); if (stbir_info->ring_buffer_begin_index >= 0) { // Get rid of whatever we don't need anymore. while (in_first_scanline > stbir_info->ring_buffer_first_scanline) { if (stbir_info->ring_buffer_first_scanline == stbir_info->ring_buffer_last_scanline) { // We just popped the last scanline off the ring buffer. // Reset it to the empty state. stbir_info->ring_buffer_begin_index = -1; stbir_info->ring_buffer_first_scanline = 0; stbir_info->ring_buffer_last_scanline = 0; break; } else { stbir_info->ring_buffer_first_scanline++; stbir_info->ring_buffer_begin_index = (stbir_info->ring_buffer_begin_index + 1) % stbir_info->ring_buffer_num_entries; } } } // Load in new ones. if (stbir_info->ring_buffer_begin_index < 0) stbir__decode_and_resample_upsample(stbir_info, in_first_scanline); while (in_last_scanline > stbir_info->ring_buffer_last_scanline) stbir__decode_and_resample_upsample( stbir_info, stbir_info->ring_buffer_last_scanline + 1); // Now all buffers should be ready to write a row of vertical sampling. stbir__resample_vertical_upsample(stbir_info, y); STBIR_PROGRESS_REPORT((float)y / stbir_info->output_h); } } static void stbir__empty_ring_buffer(stbir__info* stbir_info, int first_necessary_scanline) { int output_stride_bytes = stbir_info->output_stride_bytes; int channels = stbir_info->channels; int alpha_channel = stbir_info->alpha_channel; int type = stbir_info->type; int colorspace = stbir_info->colorspace; int output_w = stbir_info->output_w; void* output_data = stbir_info->output_data; int decode = STBIR__DECODE(type, colorspace); float* ring_buffer = stbir_info->ring_buffer; int ring_buffer_length = stbir_info->ring_buffer_length_bytes / sizeof(float); if (stbir_info->ring_buffer_begin_index >= 0) { // Get rid of whatever we don't need anymore. while (first_necessary_scanline > stbir_info->ring_buffer_first_scanline) { if (stbir_info->ring_buffer_first_scanline >= 0 && stbir_info->ring_buffer_first_scanline < stbir_info->output_h) { int output_row_start = stbir_info->ring_buffer_first_scanline * output_stride_bytes; float* ring_buffer_entry = stbir__get_ring_buffer_entry( ring_buffer, stbir_info->ring_buffer_begin_index, ring_buffer_length); stbir__encode_scanline( stbir_info, output_w, (char*)output_data + output_row_start, ring_buffer_entry, channels, alpha_channel, decode); STBIR_PROGRESS_REPORT((float)stbir_info->ring_buffer_first_scanline / stbir_info->output_h); } if (stbir_info->ring_buffer_first_scanline == stbir_info->ring_buffer_last_scanline) { // We just popped the last scanline off the ring buffer. // Reset it to the empty state. stbir_info->ring_buffer_begin_index = -1; stbir_info->ring_buffer_first_scanline = 0; stbir_info->ring_buffer_last_scanline = 0; break; } else { stbir_info->ring_buffer_first_scanline++; stbir_info->ring_buffer_begin_index = (stbir_info->ring_buffer_begin_index + 1) % stbir_info->ring_buffer_num_entries; } } } } static void stbir__buffer_loop_downsample(stbir__info* stbir_info) { int y; float scale_ratio = stbir_info->vertical_scale; int output_h = stbir_info->output_h; float in_pixels_radius = stbir__filter_info_table[stbir_info->vertical_filter].support( scale_ratio) / scale_ratio; int pixel_margin = stbir_info->vertical_filter_pixel_margin; int max_y = stbir_info->input_h + pixel_margin; STBIR_ASSERT(!stbir__use_height_upsampling(stbir_info)); for (y = -pixel_margin; y < max_y; y++) { float out_center_of_in; // Center of the current out scanline in the in // scanline space int out_first_scanline, out_last_scanline; stbir__calculate_sample_range_downsample( y, in_pixels_radius, scale_ratio, stbir_info->vertical_shift, &out_first_scanline, &out_last_scanline, &out_center_of_in); STBIR_ASSERT(out_last_scanline - out_first_scanline + 1 <= stbir_info->ring_buffer_num_entries); if (out_last_scanline < 0 || out_first_scanline >= output_h) continue; stbir__empty_ring_buffer(stbir_info, out_first_scanline); stbir__decode_and_resample_downsample(stbir_info, y); // Load in new ones. if (stbir_info->ring_buffer_begin_index < 0) stbir__add_empty_ring_buffer_entry(stbir_info, out_first_scanline); while (out_last_scanline > stbir_info->ring_buffer_last_scanline) stbir__add_empty_ring_buffer_entry( stbir_info, stbir_info->ring_buffer_last_scanline + 1); // Now the horizontal buffer is ready to write to all ring buffer rows. stbir__resample_vertical_downsample(stbir_info, y); } stbir__empty_ring_buffer(stbir_info, stbir_info->output_h); } static void stbir__setup(stbir__info* info, int input_w, int input_h, int output_w, int output_h, int channels) { info->input_w = input_w; info->input_h = input_h; info->output_w = output_w; info->output_h = output_h; info->channels = channels; } static void stbir__calculate_transform(stbir__info* info, float s0, float t0, float s1, float t1, float* transform) { info->s0 = s0; info->t0 = t0; info->s1 = s1; info->t1 = t1; if (transform) { info->horizontal_scale = transform[0]; info->vertical_scale = transform[1]; info->horizontal_shift = transform[2]; info->vertical_shift = transform[3]; } else { info->horizontal_scale = ((float)info->output_w / info->input_w) / (s1 - s0); info->vertical_scale = ((float)info->output_h / info->input_h) / (t1 - t0); info->horizontal_shift = s0 * info->output_w / (s1 - s0); info->vertical_shift = t0 * info->output_h / (t1 - t0); } } static void stbir__choose_filter(stbir__info* info, stbir_filter h_filter, stbir_filter v_filter) { if (h_filter == 0) h_filter = stbir__use_upsampling(info->horizontal_scale) ? STBIR_DEFAULT_FILTER_UPSAMPLE : STBIR_DEFAULT_FILTER_DOWNSAMPLE; if (v_filter == 0) v_filter = stbir__use_upsampling(info->vertical_scale) ? STBIR_DEFAULT_FILTER_UPSAMPLE : STBIR_DEFAULT_FILTER_DOWNSAMPLE; info->horizontal_filter = h_filter; info->vertical_filter = v_filter; } static uint32_t stbir__calculate_memory(stbir__info* info) { int pixel_margin = stbir__get_filter_pixel_margin(info->horizontal_filter, info->horizontal_scale); int filter_height = stbir__get_filter_pixel_width(info->vertical_filter, info->vertical_scale); info->horizontal_num_contributors = stbir__get_contributors(info->horizontal_scale, info->horizontal_filter, info->input_w, info->output_w); info->vertical_num_contributors = stbir__get_contributors(info->vertical_scale, info->vertical_filter, info->input_h, info->output_h); // One extra entry because floating point precision problems sometimes cause // an extra to be necessary. info->ring_buffer_num_entries = filter_height + 1; info->horizontal_contributors_size = info->horizontal_num_contributors * sizeof(stbir__contributors); info->horizontal_coefficients_size = stbir__get_total_horizontal_coefficients(info) * sizeof(float); info->vertical_contributors_size = info->vertical_num_contributors * sizeof(stbir__contributors); info->vertical_coefficients_size = stbir__get_total_vertical_coefficients(info) * sizeof(float); info->decode_buffer_size = (info->input_w + pixel_margin * 2) * info->channels * sizeof(float); info->horizontal_buffer_size = info->output_w * info->channels * sizeof(float); info->ring_buffer_size = info->output_w * info->channels * info->ring_buffer_num_entries * sizeof(float); info->encode_buffer_size = info->output_w * info->channels * sizeof(float); STBIR_ASSERT(info->horizontal_filter != 0); STBIR_ASSERT(info->horizontal_filter < STBIR__ARRAY_SIZE( stbir__filter_info_table)); // this now happens too late STBIR_ASSERT(info->vertical_filter != 0); STBIR_ASSERT(info->vertical_filter < STBIR__ARRAY_SIZE( stbir__filter_info_table)); // this now happens too late if (stbir__use_height_upsampling(info)) // The horizontal buffer is for when we're downsampling the height and we // can't output the result of sampling the decode buffer directly into the // ring buffers. info->horizontal_buffer_size = 0; else // The encode buffer is to retain precision in the height upsampling method // and isn't used when height downsampling. info->encode_buffer_size = 0; return info->horizontal_contributors_size + info->horizontal_coefficients_size + info->vertical_contributors_size + info->vertical_coefficients_size + info->decode_buffer_size + info->horizontal_buffer_size + info->ring_buffer_size + info->encode_buffer_size; } static int stbir__resize_allocated( stbir__info* info, const void* input_data, int input_stride_in_bytes, void* output_data, int output_stride_in_bytes, int alpha_channel, uint32_t flags, stbir_datatype type, stbir_edge edge_horizontal, stbir_edge edge_vertical, stbir_colorspace colorspace, void* tempmem, size_t tempmem_size_in_bytes) { size_t memory_required = stbir__calculate_memory(info); int width_stride_input = input_stride_in_bytes ? input_stride_in_bytes : info->channels * info->input_w * stbir__type_size[type]; int width_stride_output = output_stride_in_bytes ? output_stride_in_bytes : info->channels * info->output_w * stbir__type_size[type]; #ifdef STBIR_DEBUG_OVERWRITE_TEST #define OVERWRITE_ARRAY_SIZE 8 unsigned char overwrite_output_before_pre[OVERWRITE_ARRAY_SIZE]; unsigned char overwrite_tempmem_before_pre[OVERWRITE_ARRAY_SIZE]; unsigned char overwrite_output_after_pre[OVERWRITE_ARRAY_SIZE]; unsigned char overwrite_tempmem_after_pre[OVERWRITE_ARRAY_SIZE]; size_t begin_forbidden = width_stride_output * (info->output_h - 1) + info->output_w * info->channels * stbir__type_size[type]; memcpy(overwrite_output_before_pre, &((unsigned char*)output_data)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE); memcpy(overwrite_output_after_pre, &((unsigned char*)output_data)[begin_forbidden], OVERWRITE_ARRAY_SIZE); memcpy(overwrite_tempmem_before_pre, &((unsigned char*)tempmem)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE); memcpy(overwrite_tempmem_after_pre, &((unsigned char*)tempmem)[tempmem_size_in_bytes], OVERWRITE_ARRAY_SIZE); #endif STBIR_ASSERT(info->channels >= 0); STBIR_ASSERT(info->channels <= STBIR_MAX_CHANNELS); if (info->channels < 0 || info->channels > STBIR_MAX_CHANNELS) return 0; STBIR_ASSERT(info->horizontal_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); STBIR_ASSERT(info->vertical_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); if (info->horizontal_filter >= STBIR__ARRAY_SIZE(stbir__filter_info_table)) return 0; if (info->vertical_filter >= STBIR__ARRAY_SIZE(stbir__filter_info_table)) return 0; if (alpha_channel < 0) flags |= STBIR_FLAG_ALPHA_USES_COLORSPACE | STBIR_FLAG_ALPHA_PREMULTIPLIED; if (!(flags & STBIR_FLAG_ALPHA_USES_COLORSPACE) || !(flags & STBIR_FLAG_ALPHA_PREMULTIPLIED)) { STBIR_ASSERT(alpha_channel >= 0 && alpha_channel < info->channels); } if (alpha_channel >= info->channels) return 0; STBIR_ASSERT(tempmem); if (!tempmem) return 0; STBIR_ASSERT(tempmem_size_in_bytes >= memory_required); if (tempmem_size_in_bytes < memory_required) return 0; bzero(tempmem, tempmem_size_in_bytes); info->input_data = input_data; info->input_stride_bytes = width_stride_input; info->output_data = output_data; info->output_stride_bytes = width_stride_output; info->alpha_channel = alpha_channel; info->flags = flags; info->type = type; info->edge_horizontal = edge_horizontal; info->edge_vertical = edge_vertical; info->colorspace = colorspace; info->horizontal_coefficient_width = stbir__get_coefficient_width( info->horizontal_filter, info->horizontal_scale); info->vertical_coefficient_width = stbir__get_coefficient_width(info->vertical_filter, info->vertical_scale); info->horizontal_filter_pixel_width = stbir__get_filter_pixel_width( info->horizontal_filter, info->horizontal_scale); info->vertical_filter_pixel_width = stbir__get_filter_pixel_width( info->vertical_filter, info->vertical_scale); info->horizontal_filter_pixel_margin = stbir__get_filter_pixel_margin( info->horizontal_filter, info->horizontal_scale); info->vertical_filter_pixel_margin = stbir__get_filter_pixel_margin( info->vertical_filter, info->vertical_scale); info->ring_buffer_length_bytes = info->output_w * info->channels * sizeof(float); info->decode_buffer_pixels = info->input_w + info->horizontal_filter_pixel_margin * 2; #define STBIR__NEXT_MEMPTR(current, newtype) \ (newtype*)(((unsigned char*)current) + current##_size) info->horizontal_contributors = (stbir__contributors*)tempmem; info->horizontal_coefficients = STBIR__NEXT_MEMPTR(info->horizontal_contributors, float); info->vertical_contributors = STBIR__NEXT_MEMPTR(info->horizontal_coefficients, stbir__contributors); info->vertical_coefficients = STBIR__NEXT_MEMPTR(info->vertical_contributors, float); info->decode_buffer = STBIR__NEXT_MEMPTR(info->vertical_coefficients, float); if (stbir__use_height_upsampling(info)) { info->horizontal_buffer = NULL; info->ring_buffer = STBIR__NEXT_MEMPTR(info->decode_buffer, float); info->encode_buffer = STBIR__NEXT_MEMPTR(info->ring_buffer, float); STBIR_ASSERT( (size_t)STBIR__NEXT_MEMPTR(info->encode_buffer, unsigned char) == (size_t)tempmem + tempmem_size_in_bytes); } else { info->horizontal_buffer = STBIR__NEXT_MEMPTR(info->decode_buffer, float); info->ring_buffer = STBIR__NEXT_MEMPTR(info->horizontal_buffer, float); info->encode_buffer = NULL; STBIR_ASSERT((size_t)STBIR__NEXT_MEMPTR(info->ring_buffer, unsigned char) == (size_t)tempmem + tempmem_size_in_bytes); } #undef STBIR__NEXT_MEMPTR // This signals that the ring buffer is empty info->ring_buffer_begin_index = -1; stbir__calculate_filters( info->horizontal_contributors, info->horizontal_coefficients, info->horizontal_filter, info->horizontal_scale, info->horizontal_shift, info->input_w, info->output_w); stbir__calculate_filters(info->vertical_contributors, info->vertical_coefficients, info->vertical_filter, info->vertical_scale, info->vertical_shift, info->input_h, info->output_h); STBIR_PROGRESS_REPORT(0); if (stbir__use_height_upsampling(info)) stbir__buffer_loop_upsample(info); else stbir__buffer_loop_downsample(info); STBIR_PROGRESS_REPORT(1); #ifdef STBIR_DEBUG_OVERWRITE_TEST STBIR_ASSERT(memcmp(overwrite_output_before_pre, &((unsigned char*)output_data)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE) == 0); STBIR_ASSERT(memcmp(overwrite_output_after_pre, &((unsigned char*)output_data)[begin_forbidden], OVERWRITE_ARRAY_SIZE) == 0); STBIR_ASSERT(memcmp(overwrite_tempmem_before_pre, &((unsigned char*)tempmem)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE) == 0); STBIR_ASSERT(memcmp(overwrite_tempmem_after_pre, &((unsigned char*)tempmem)[tempmem_size_in_bytes], OVERWRITE_ARRAY_SIZE) == 0); #endif return 1; } static int stbir__resize_arbitrary( void* alloc_context, const void* input_data, int input_w, int input_h, int input_stride_in_bytes, void* output_data, int output_w, int output_h, int output_stride_in_bytes, float s0, float t0, float s1, float t1, float* transform, int channels, int alpha_channel, uint32_t flags, stbir_datatype type, stbir_filter h_filter, stbir_filter v_filter, stbir_edge edge_horizontal, stbir_edge edge_vertical, stbir_colorspace colorspace) { stbir__info info; int result; size_t memory_required; void* extra_memory; stbir__setup(&info, input_w, input_h, output_w, output_h, channels); stbir__calculate_transform(&info, s0, t0, s1, t1, transform); stbir__choose_filter(&info, h_filter, v_filter); memory_required = stbir__calculate_memory(&info); extra_memory = STBIR_MALLOC(memory_required, alloc_context); if (!extra_memory) return 0; result = stbir__resize_allocated( &info, input_data, input_stride_in_bytes, output_data, output_stride_in_bytes, alpha_channel, flags, type, edge_horizontal, edge_vertical, colorspace, extra_memory, memory_required); STBIR_FREE(extra_memory, alloc_context); return result; } int stbir_resize_uint8(const unsigned char* input_pixels, int input_w, int input_h, int input_stride_in_bytes, unsigned char* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels) { return stbir__resize_arbitrary( NULL, input_pixels, input_w, input_h, input_stride_in_bytes, output_pixels, output_w, output_h, output_stride_in_bytes, 0, 0, 1, 1, NULL, num_channels, -1, 0, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_LINEAR); } int stbir_resize_float(const float* input_pixels, int input_w, int input_h, int input_stride_in_bytes, float* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels) { return stbir__resize_arbitrary( NULL, input_pixels, input_w, input_h, input_stride_in_bytes, output_pixels, output_w, output_h, output_stride_in_bytes, 0, 0, 1, 1, NULL, num_channels, -1, 0, STBIR_TYPE_FLOAT, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_LINEAR); } int stbir_resize_uint8_srgb(const unsigned char* input_pixels, int input_w, int input_h, int input_stride_in_bytes, unsigned char* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels, int alpha_channel, int flags) { return stbir__resize_arbitrary( NULL, input_pixels, input_w, input_h, input_stride_in_bytes, output_pixels, output_w, output_h, output_stride_in_bytes, 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_SRGB); } int stbir_resize_uint8_srgb_edgemode(const unsigned char* input_pixels, int input_w, int input_h, int input_stride_in_bytes, unsigned char* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels, int alpha_channel, int flags, stbir_edge edge_wrap_mode) { return stbir__resize_arbitrary( NULL, input_pixels, input_w, input_h, input_stride_in_bytes, output_pixels, output_w, output_h, output_stride_in_bytes, 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT, edge_wrap_mode, edge_wrap_mode, STBIR_COLORSPACE_SRGB); } int stbir_resize_uint8_generic(const unsigned char* input_pixels, int input_w, int input_h, int input_stride_in_bytes, unsigned char* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels, int alpha_channel, int flags, stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, void* alloc_context) { return stbir__resize_arbitrary( alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, output_pixels, output_w, output_h, output_stride_in_bytes, 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, STBIR_TYPE_UINT8, filter, filter, edge_wrap_mode, edge_wrap_mode, space); } int stbir_resize_uint16_generic(const uint16_t* input_pixels, int input_w, int input_h, int input_stride_in_bytes, uint16_t* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels, int alpha_channel, int flags, stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, void* alloc_context) { return stbir__resize_arbitrary( alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, output_pixels, output_w, output_h, output_stride_in_bytes, 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, STBIR_TYPE_UINT16, filter, filter, edge_wrap_mode, edge_wrap_mode, space); } int stbir_resize_float_generic(const float* input_pixels, int input_w, int input_h, int input_stride_in_bytes, float* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels, int alpha_channel, int flags, stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, void* alloc_context) { return stbir__resize_arbitrary( alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, output_pixels, output_w, output_h, output_stride_in_bytes, 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, STBIR_TYPE_FLOAT, filter, filter, edge_wrap_mode, edge_wrap_mode, space); } int stbir_resize(const void* input_pixels, int input_w, int input_h, int input_stride_in_bytes, void* output_pixels, int output_w, int output_h, int output_stride_in_bytes, stbir_datatype datatype, int num_channels, int alpha_channel, int flags, stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, stbir_filter filter_horizontal, stbir_filter filter_vertical, stbir_colorspace space, void* alloc_context) { return stbir__resize_arbitrary( alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, output_pixels, output_w, output_h, output_stride_in_bytes, 0, 0, 1, 1, NULL, num_channels, alpha_channel, flags, datatype, filter_horizontal, filter_vertical, edge_mode_horizontal, edge_mode_vertical, space); } int stbir_resize_subpixel( const void* input_pixels, int input_w, int input_h, int input_stride_in_bytes, void* output_pixels, int output_w, int output_h, int output_stride_in_bytes, stbir_datatype datatype, int num_channels, int alpha_channel, int flags, stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, stbir_filter filter_horizontal, stbir_filter filter_vertical, stbir_colorspace space, void* alloc_context, float x_scale, float y_scale, float x_offset, float y_offset) { float transform[4]; transform[0] = x_scale; transform[1] = y_scale; transform[2] = x_offset; transform[3] = y_offset; return stbir__resize_arbitrary( alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, output_pixels, output_w, output_h, output_stride_in_bytes, 0, 0, 1, 1, transform, num_channels, alpha_channel, flags, datatype, filter_horizontal, filter_vertical, edge_mode_horizontal, edge_mode_vertical, space); } int stbir_resize_region( const void* input_pixels, int input_w, int input_h, int input_stride_in_bytes, void* output_pixels, int output_w, int output_h, int output_stride_in_bytes, stbir_datatype datatype, int num_channels, int alpha_channel, int flags, stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, stbir_filter filter_horizontal, stbir_filter filter_vertical, stbir_colorspace space, void* alloc_context, float s0, float t0, float s1, float t1) { return stbir__resize_arbitrary( alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes, output_pixels, output_w, output_h, output_stride_in_bytes, s0, t0, s1, t1, NULL, num_channels, alpha_channel, flags, datatype, filter_horizontal, filter_vertical, edge_mode_horizontal, edge_mode_vertical, space); }
94,884
2,374
jart/cosmopolitan
false
cosmopolitan/third_party/stb/stb_image_resize.h
#ifndef COSMOPOLITAN_THIRD_PARTY_STB_STB_IMAGE_RESIZE_H_ #define COSMOPOLITAN_THIRD_PARTY_STB_STB_IMAGE_RESIZE_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ // Easy-to-use API: // // * "input pixels" points to an array of image data with 'num_channels' // channels (e.g. RGB=3, RGBA=4) // * input_w is input image width (x-axis), input_h is input image height // (y-axis) // * stride is the offset between successive rows of image data in memory, // in bytes. you can // specify 0 to mean packed continuously in memory // * alpha channel is treated identically to other channels. // * colorspace is linear or sRGB as specified by function name // * returned result is 1 for success or 0 in case of an error. // #define STBIR_ASSERT() to trigger an assert on parameter validation // errors. // * Memory required grows approximately linearly with input and output // size, but with // discontinuities at input_w == output_w and input_h == output_h. // * These functions use a "default" resampling filter defined at compile // time. To change the filter, // you can change the compile-time defaults by #defining // STBIR_DEFAULT_FILTER_UPSAMPLE and STBIR_DEFAULT_FILTER_DOWNSAMPLE, or // you can use the medium-complexity API. int stbir_resize_uint8(const unsigned char* input_pixels, int input_w, int input_h, int input_stride_in_bytes, unsigned char* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels); int stbir_resize_float(const float* input_pixels, int input_w, int input_h, int input_stride_in_bytes, float* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels); // The following functions interpret image data as gamma-corrected sRGB. // Specify STBIR_ALPHA_CHANNEL_NONE if you have no alpha channel, // or otherwise provide the index of the alpha channel. Flags value // of 0 will probably do the right thing if you're not sure what // the flags mean. #define STBIR_ALPHA_CHANNEL_NONE -1 // Set this flag if your texture has premultiplied alpha. Otherwise, stbir will // use alpha-weighted resampling (effectively premultiplying, resampling, // then unpremultiplying). #define STBIR_FLAG_ALPHA_PREMULTIPLIED (1 << 0) // The specified alpha channel should be handled as gamma-corrected value even // when doing sRGB operations. #define STBIR_FLAG_ALPHA_USES_COLORSPACE (1 << 1) int stbir_resize_uint8_srgb(const unsigned char* input_pixels, int input_w, int input_h, int input_stride_in_bytes, unsigned char* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels, int alpha_channel, int flags); typedef enum { STBIR_EDGE_CLAMP = 1, STBIR_EDGE_REFLECT = 2, STBIR_EDGE_WRAP = 3, STBIR_EDGE_ZERO = 4, } stbir_edge; // This function adds the ability to specify how requests to sample off the edge // of the image are handled. int stbir_resize_uint8_srgb_edgemode(const unsigned char* input_pixels, int input_w, int input_h, int input_stride_in_bytes, unsigned char* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels, int alpha_channel, int flags, stbir_edge edge_wrap_mode); // Medium-complexity API // // This extends the easy-to-use API as follows: // // * Alpha-channel can be processed separately // * If alpha_channel is not STBIR_ALPHA_CHANNEL_NONE // * Alpha channel will not be gamma corrected (unless // flags&STBIR_FLAG_GAMMA_CORRECT) // * Filters will be weighted by alpha channel (unless // flags&STBIR_FLAG_ALPHA_PREMULTIPLIED) // * Filter can be selected explicitly // * uint16 image type // * sRGB colorspace available for all types // * context parameter for passing to STBIR_MALLOC typedef enum { // use same filter type that easy-to-use API chooses STBIR_FILTER_DEFAULT = 0, // a trapezoid w/1-pixel wide ramps, same result as box for integer // scale ratios STBIR_FILTER_BOX = 1, // On upsampling, produces same results as bilinear texture filtering STBIR_FILTER_TRIANGLE = 2, // The cubic b-spline (aka Mitchell-Netrevalli with B=1,C=0), gaussian-esque STBIR_FILTER_CUBICBSPLINE = 3, // An interpolating cubic spline STBIR_FILTER_CATMULLROM = 4, // Mitchell-Netrevalli filter with B=1/3, C=1/3 STBIR_FILTER_MITCHELL = 5, } stbir_filter; typedef enum { STBIR_COLORSPACE_LINEAR, STBIR_COLORSPACE_SRGB, STBIR_MAX_COLORSPACES, } stbir_colorspace; // The following functions are all identical except for the type of the image // data int stbir_resize_uint8_generic(const unsigned char* input_pixels, int input_w, int input_h, int input_stride_in_bytes, unsigned char* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels, int alpha_channel, int flags, stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, void* alloc_context); int stbir_resize_uint16_generic(const uint16_t* input_pixels, int input_w, int input_h, int input_stride_in_bytes, uint16_t* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels, int alpha_channel, int flags, stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, void* alloc_context); int stbir_resize_float_generic(const float* input_pixels, int input_w, int input_h, int input_stride_in_bytes, float* output_pixels, int output_w, int output_h, int output_stride_in_bytes, int num_channels, int alpha_channel, int flags, stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space, void* alloc_context); // Full-complexity API // // This extends the medium API as follows: // // * uint32 image type // * not typesafe // * separate filter types for each axis // * separate edge modes for each axis // * can specify scale explicitly for subpixel correctness // * can specify image source tile using texture coordinates typedef enum { STBIR_TYPE_UINT8, STBIR_TYPE_UINT16, STBIR_TYPE_UINT32, STBIR_TYPE_FLOAT, STBIR_MAX_TYPES } stbir_datatype; int stbir_resize(const void* input_pixels, int input_w, int input_h, int input_stride_in_bytes, void* output_pixels, int output_w, int output_h, int output_stride_in_bytes, stbir_datatype datatype, int num_channels, int alpha_channel, int flags, stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, stbir_filter filter_horizontal, stbir_filter filter_vertical, stbir_colorspace space, void* alloc_context); int stbir_resize_subpixel( const void* input_pixels, int input_w, int input_h, int input_stride_in_bytes, void* output_pixels, int output_w, int output_h, int output_stride_in_bytes, stbir_datatype datatype, int num_channels, int alpha_channel, int flags, stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, stbir_filter filter_horizontal, stbir_filter filter_vertical, stbir_colorspace space, void* alloc_context, float x_scale, float y_scale, float x_offset, float y_offset); int stbir_resize_region( const void* input_pixels, int input_w, int input_h, int input_stride_in_bytes, void* output_pixels, int output_w, int output_h, int output_stride_in_bytes, stbir_datatype datatype, int num_channels, int alpha_channel, int flags, stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical, stbir_filter filter_horizontal, stbir_filter filter_vertical, stbir_colorspace space, void* alloc_context, float s0, float t0, float s1, float t1); // (s0, t0) & (s1, t1) are the top-left and bottom right corner (uv addressing // style: [0, 1]x[0, 1]) of a region of the input image to use. COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_THIRD_PARTY_STB_STB_IMAGE_RESIZE_H_ */
8,969
193
jart/cosmopolitan
false
cosmopolitan/third_party/puff/README.cosmo
DESCRIPTION puff is a tiny implementation of deflate decompression LICENSE zlib license ORIGIN Origin: [email protected]:madler/zlib.git Commit: 03614c56ad299f9b238c75aa1e66f0c08fc4fc8b Author: Mark Adler <[email protected]> Date: Sun Oct 30 08:36:13 2016 -0700 LOCAL CHANGES None.
310
19
jart/cosmopolitan
false
cosmopolitan/third_party/puff/puff.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=4 sts=4 sw=4 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ Copyright (C) 2002-2013 Mark Adler, all rights reserved │ │ version 2.3, 21 Jan 2013 │ │ │ │ This software is provided 'as-is', without any express or implied │ │ warranty. In no event will the author be held liable for any damages │ │ arising from the use of this software. │ │ │ │ Permission is granted to anyone to use this software for any purpose, │ │ including commercial applications, and to alter it and redistribute it │ │ freely, subject to the following restrictions: │ │ │ │ 1. The origin of this software must not be misrepresented; you must not │ │ claim that you wrote the original software. If you use this software │ │ in a product, an acknowledgment in the product documentation would be │ │ appreciated but is not required. │ │ 2. Altered source versions must be plainly marked as such, and must not be │ │ misrepresented as being the original software. │ │ 3. This notice may not be removed or altered from any source distribution. │ │ │ │ Mark Adler [email protected] │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/runtime/runtime.h" #include "third_party/puff/puff.h" // clang-format off asm(".ident\t\"\\n\\n\ puff (zlib License)\\n\ Copyright 2002-203 Mark Adler\""); asm(".include \"libc/disclaimer.inc\""); // Origin: [email protected]:madler/zlib.git // Commit: 03614c56ad299f9b238c75aa1e66f0c08fc4fc8b // Author: Mark Adler <[email protected]> // Date: Sun Oct 30 08:36:13 2016 -0700 /* * puff.c * Copyright (C) 2002-2013 Mark Adler * For conditions of distribution and use, see copyright notice in puff.h * version 2.3, 21 Jan 2013 * * puff.c is a simple inflate written to be an unambiguous way to specify the * deflate format. It is not written for speed but rather simplicity. As a * side benefit, this code might actually be useful when small code is more * important than speed, such as bootstrap applications. For typical deflate * data, zlib's inflate() is about four times as fast as puff(). zlib's * inflate compiles to around 20K on my machine, whereas puff.c compiles to * around 4K on my machine (a PowerPC using GNU cc). If the faster decode() * function here is used, then puff() is only twice as slow as zlib's * inflate(). * * All dynamically allocated memory comes from the stack. The stack required * is less than 2K bytes. This code is compatible with 16-bit int's and * assumes that long's are at least 32 bits. puff.c uses the short data type, * assumed to be 16 bits, for arrays in order to conserve memory. The code * works whether integers are stored big endian or little endian. * * In the comments below are "Format notes" that describe the inflate process * and document some of the less obvious aspects of the format. This source * code is meant to supplement RFC 1951, which formally describes the deflate * format: * * http://www.zlib.org/rfc-deflate.html */ /* * Change history: * * 1.0 10 Feb 2002 - First version * 1.1 17 Feb 2002 - Clarifications of some comments and notes * - Update puff() dest and source pointers on negative * errors to facilitate debugging deflators * - Remove longest from struct huffman -- not needed * - Simplify offs[] index in construct() * - Add input size and checking, using longjmp() to * maintain easy readability * - Use short data type for large arrays * - Use pointers instead of long to specify source and * destination sizes to avoid arbitrary 4 GB limits * 1.2 17 Mar 2002 - Add faster version of decode(), doubles speed (!), * but leave simple version for readabilty * - Make sure invalid distances detected if pointers * are 16 bits * - Fix fixed codes table error * - Provide a scanning mode for determining size of * uncompressed data * 1.3 20 Mar 2002 - Go back to lengths for puff() parameters [Gailly] * - Add a puff.h file for the interface * - Add braces in puff() for else do [Gailly] * - Use indexes instead of pointers for readability * 1.4 31 Mar 2002 - Simplify construct() code set check * - Fix some comments * - Add FIXLCODES #define * 1.5 6 Apr 2002 - Minor comment fixes * 1.6 7 Aug 2002 - Minor format changes * 1.7 3 Mar 2003 - Added test code for distribution * - Added zlib-like license * 1.8 9 Jan 2004 - Added some comments on no distance codes case * 1.9 21 Feb 2008 - Fix bug on 16-bit integer architectures [Pohland] * - Catch missing end-of-block symbol error * 2.0 25 Jul 2008 - Add #define to permit distance too far back * - Add option in TEST code for puff to write the data * - Add option in TEST code to skip input bytes * - Allow TEST code to read from piped stdin * 2.1 4 Apr 2010 - Avoid variable initialization for happier compilers * - Avoid unsigned comparisons for even happier compilers * 2.2 25 Apr 2010 - Fix bug in variable initializations [Oberhumer] * - Add const where appropriate [Oberhumer] * - Split if's and ?'s for coverage testing * - Break out test code to separate file * - Move NIL to puff.h * - Allow incomplete code only if single code length is 1 * - Add full code coverage test to Makefile * 2.3 21 Jan 2013 - Check for invalid code length codes in dynamic blocks */ #define local static /* for local function definitions */ /* * Maximums for allocations and loops. It is not useful to change these -- * they are fixed by the deflate format. */ #define MAXBITS 15 /* maximum bits in a code */ #define MAXLCODES 286 /* maximum number of literal/length codes */ #define MAXDCODES 30 /* maximum number of distance codes */ #define MAXCODES (MAXLCODES+MAXDCODES) /* maximum codes lengths to read */ #define FIXLCODES 288 /* number of fixed literal/length codes */ /* input and output state */ struct state { /* output state */ unsigned char *out; /* output buffer */ unsigned long outlen; /* available space at out */ unsigned long outcnt; /* bytes written to out so far */ /* input state */ const unsigned char *in; /* input buffer */ unsigned long inlen; /* available input at in */ unsigned long incnt; /* bytes read so far */ int bitbuf; /* bit buffer */ int bitcnt; /* number of bits in bit buffer */ /* input limit error return state for bits() and decode() */ jmp_buf env; }; /* * Return need bits from the input stream. This always leaves less than * eight bits in the buffer. bits() works properly for need == 0. * * Format notes: * * - Bits are stored in bytes from the least significant bit to the most * significant bit. Therefore bits are dropped from the bottom of the bit * buffer, using shift right, and new bytes are appended to the top of the * bit buffer, using shift left. */ local noinstrument int bits(struct state *s, int need) { long val; /* bit accumulator (can use up to 20 bits) */ /* load at least need bits into val */ val = s->bitbuf; while (s->bitcnt < need) { if (s->incnt == s->inlen) longjmp(s->env, 1); /* out of input */ val |= (long)(s->in[s->incnt++]) << s->bitcnt; /* load eight bits */ s->bitcnt += 8; } /* drop need bits and update buffer, always zero to seven bits left */ s->bitbuf = (int)(val >> need); s->bitcnt -= need; /* return need bits, zeroing the bits above that */ return (int)(val & ((1L << need) - 1)); } /* * Process a stored block. * * Format notes: * * - After the two-bit stored block type (00), the stored block length and * stored bytes are byte-aligned for fast copying. Therefore any leftover * bits in the byte that has the last bit of the type, as many as seven, are * discarded. The value of the discarded bits are not defined and should not * be checked against any expectation. * * - The second inverted copy of the stored block length does not have to be * checked, but it's probably a good idea to do so anyway. * * - A stored block can have zero length. This is sometimes used to byte-align * subsets of the compressed data for random access or partial recovery. */ local int stored(struct state *s) { unsigned len; /* length of stored block */ /* discard leftover bits from current byte (assumes s->bitcnt < 8) */ s->bitbuf = 0; s->bitcnt = 0; /* get length and check against its one's complement */ if (s->incnt + 4 > s->inlen) return 2; /* not enough input */ len = s->in[s->incnt++]; len |= s->in[s->incnt++] << 8; if (s->in[s->incnt++] != (~len & 0xff) || s->in[s->incnt++] != ((~len >> 8) & 0xff)) return -2; /* didn't match complement! */ /* copy len bytes from in to out */ if (s->incnt + len > s->inlen) return 2; /* not enough input */ if (s->out != NIL) { if (s->outcnt + len > s->outlen) return 1; /* not enough output space */ while (len--) s->out[s->outcnt++] = s->in[s->incnt++]; } else { /* just scanning */ s->outcnt += len; s->incnt += len; } /* done with a valid stored block */ return 0; } /* * Huffman code decoding tables. count[1..MAXBITS] is the number of symbols of * each length, which for a canonical code are stepped through in order. * symbol[] are the symbol values in canonical order, where the number of * entries is the sum of the counts in count[]. The decoding process can be * seen in the function decode() below. */ struct huffman { short *count; /* number of symbols of each length */ short *symbol; /* canonically ordered symbols */ }; /* * Decode a code from the stream s using huffman table h. Return the symbol or * a negative value if there is an error. If all of the lengths are zero, i.e. * an empty code, or if the code is incomplete and an invalid code is received, * then -10 is returned after reading MAXBITS bits. * * Format notes: * * - The codes as stored in the compressed data are bit-reversed relative to * a simple integer ordering of codes of the same lengths. Hence below the * bits are pulled from the compressed data one at a time and used to * build the code value reversed from what is in the stream in order to * permit simple integer comparisons for decoding. A table-based decoding * scheme (as used in zlib) does not need to do this reversal. * * - The first code for the shortest length is all zeros. Subsequent codes of * the same length are simply integer increments of the previous code. When * moving up a length, a zero bit is appended to the code. For a complete * code, the last code of the longest length will be all ones. * * - Incomplete codes are handled by this decoder, since they are permitted * in the deflate format. See the format notes for fixed() and dynamic(). */ #ifdef SLOW local int decode(struct state *s, const struct huffman *h) { int len; /* current number of bits in code */ int code; /* len bits being decoded */ int first; /* first code of length len */ int count; /* number of codes of length len */ int index; /* index of first code of length len in symbol table */ code = first = index = 0; for (len = 1; len <= MAXBITS; len++) { code |= bits(s, 1); /* get next bit */ count = h->count[len]; if (code - count < first) /* if length len, return symbol */ return h->symbol[index + (code - first)]; index += count; /* else update for next length */ first += count; first <<= 1; code <<= 1; } return -10; /* ran out of codes */ } /* * A faster version of decode() for real applications of this code. It's not * as readable, but it makes puff() twice as fast. And it only makes the code * a few percent larger. */ #else /* !SLOW */ local int decode(struct state *s, const struct huffman *h) { int len; /* current number of bits in code */ int code; /* len bits being decoded */ int first; /* first code of length len */ int count; /* number of codes of length len */ int index; /* index of first code of length len in symbol table */ int bitbuf; /* bits from stream */ int left; /* bits left in next or left to process */ short *next; /* next number of codes */ bitbuf = s->bitbuf; left = s->bitcnt; code = first = index = 0; len = 1; next = h->count + 1; while (1) { while (left--) { code |= bitbuf & 1; bitbuf >>= 1; count = *next++; if (code - count < first) { /* if length len, return symbol */ s->bitbuf = bitbuf; s->bitcnt = (s->bitcnt - len) & 7; return h->symbol[index + (code - first)]; } index += count; /* else update for next length */ first += count; first <<= 1; code <<= 1; len++; } left = (MAXBITS+1) - len; if (left == 0) break; if (s->incnt == s->inlen) longjmp(s->env, 1); /* out of input */ bitbuf = s->in[s->incnt++]; if (left > 8) left = 8; } return -10; /* ran out of codes */ } #endif /* SLOW */ /* * Given the list of code lengths length[0..n-1] representing a canonical * Huffman code for n symbols, construct the tables required to decode those * codes. Those tables are the number of codes of each length, and the symbols * sorted by length, retaining their original order within each length. The * return value is zero for a complete code set, negative for an over- * subscribed code set, and positive for an incomplete code set. The tables * can be used if the return value is zero or positive, but they cannot be used * if the return value is negative. If the return value is zero, it is not * possible for decode() using that table to return an error--any stream of * enough bits will resolve to a symbol. If the return value is positive, then * it is possible for decode() using that table to return an error for received * codes past the end of the incomplete lengths. * * Not used by decode(), but used for error checking, h->count[0] is the number * of the n symbols not in the code. So n - h->count[0] is the number of * codes. This is useful for checking for incomplete codes that have more than * one symbol, which is an error in a dynamic block. * * Assumption: for all i in 0..n-1, 0 <= length[i] <= MAXBITS * This is assured by the construction of the length arrays in dynamic() and * fixed() and is not verified by construct(). * * Format notes: * * - Permitted and expected examples of incomplete codes are one of the fixed * codes and any code with a single symbol which in deflate is coded as one * bit instead of zero bits. See the format notes for fixed() and dynamic(). * * - Within a given code length, the symbols are kept in ascending order for * the code bits definition. */ local int construct(struct huffman *h, const short *length, int n) { int symbol; /* current symbol when stepping through length[] */ int len; /* current length when stepping through h->count[] */ int left; /* number of possible codes left of current length */ short offs[MAXBITS+1]; /* offsets in symbol table for each length */ /* count number of codes of each length */ for (len = 0; len <= MAXBITS; len++) h->count[len] = 0; for (symbol = 0; symbol < n; symbol++) (h->count[length[symbol]])++; /* assumes lengths are within bounds */ if (h->count[0] == n) /* no codes! */ return 0; /* complete, but decode() will fail */ /* check for an over-subscribed or incomplete set of lengths */ left = 1; /* one possible code of zero length */ for (len = 1; len <= MAXBITS; len++) { left <<= 1; /* one more bit, double codes left */ left -= h->count[len]; /* deduct count from possible codes */ if (left < 0) return left; /* over-subscribed--return negative */ } /* left > 0 means incomplete */ /* generate offsets into symbol table for each length for sorting */ offs[1] = 0; for (len = 1; len < MAXBITS; len++) offs[len + 1] = offs[len] + h->count[len]; /* * put symbols in table sorted by length, by symbol order within each * length */ for (symbol = 0; symbol < n; symbol++) if (length[symbol] != 0) h->symbol[offs[length[symbol]]++] = symbol; /* return zero for complete set, positive for incomplete set */ return left; } /* * Decode literal/length and distance codes until an end-of-block code. * * Format notes: * * - Compressed data that is after the block type if fixed or after the code * description if dynamic is a combination of literals and length/distance * pairs terminated by and end-of-block code. Literals are simply Huffman * coded bytes. A length/distance pair is a coded length followed by a * coded distance to represent a string that occurs earlier in the * uncompressed data that occurs again at the current location. * * - Literals, lengths, and the end-of-block code are combined into a single * code of up to 286 symbols. They are 256 literals (0..255), 29 length * symbols (257..285), and the end-of-block symbol (256). * * - There are 256 possible lengths (3..258), and so 29 symbols are not enough * to represent all of those. Lengths 3..10 and 258 are in fact represented * by just a length symbol. Lengths 11..257 are represented as a symbol and * some number of extra bits that are added as an integer to the base length * of the length symbol. The number of extra bits is determined by the base * length symbol. These are in the static arrays below, lens[] for the base * lengths and lext[] for the corresponding number of extra bits. * * - The reason that 258 gets its own symbol is that the longest length is used * often in highly redundant files. Note that 258 can also be coded as the * base value 227 plus the maximum extra value of 31. While a good deflate * should never do this, it is not an error, and should be decoded properly. * * - If a length is decoded, including its extra bits if any, then it is * followed a distance code. There are up to 30 distance symbols. Again * there are many more possible distances (1..32768), so extra bits are added * to a base value represented by the symbol. The distances 1..4 get their * own symbol, but the rest require extra bits. The base distances and * corresponding number of extra bits are below in the static arrays dist[] * and dext[]. * * - Literal bytes are simply written to the output. A length/distance pair is * an instruction to copy previously uncompressed bytes to the output. The * copy is from distance bytes back in the output stream, copying for length * bytes. * * - Distances pointing before the beginning of the output data are not * permitted. * * - Overlapped copies, where the length is greater than the distance, are * allowed and common. For example, a distance of one and a length of 258 * simply copies the last byte 258 times. A distance of four and a length of * twelve copies the last four bytes three times. A simple forward copy * ignoring whether the length is greater than the distance or not implements * this correctly. You should not use memcpy() since its behavior is not * defined for overlapped arrays. You should not use memmove() or bcopy() * since though their behavior -is- defined for overlapping arrays, it is * defined to do the wrong thing in this case. */ local int codes(struct state *s, const struct huffman *lencode, const struct huffman *distcode) { int symbol; /* decoded symbol */ int len; /* length for copy */ unsigned dist; /* distance for copy */ static const short lens[29] = { /* Size base for length codes 257..285 */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258}; static const short lext[29] = { /* Extra bits for length codes 257..285 */ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0}; static const short dists[30] = { /* Offset base for distance codes 0..29 */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577}; static const short dext[30] = { /* Extra bits for distance codes 0..29 */ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; /* decode literals and length/distance pairs */ do { symbol = decode(s, lencode); if (symbol < 0) return symbol; /* invalid symbol */ if (symbol < 256) { /* literal: symbol is the byte */ /* write out the literal */ if (s->out != NIL) { if (s->outcnt == s->outlen) return 1; s->out[s->outcnt] = symbol; } s->outcnt++; } else if (symbol > 256) { /* length */ /* get and compute length */ symbol -= 257; if (symbol >= 29) return -10; /* invalid fixed code */ len = lens[symbol] + bits(s, lext[symbol]); /* get and check distance */ symbol = decode(s, distcode); if (symbol < 0) return symbol; /* invalid symbol */ dist = dists[symbol] + bits(s, dext[symbol]); #ifndef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR if (dist > s->outcnt) return -11; /* distance too far back */ #endif /* copy length bytes from distance bytes back */ if (s->out != NIL) { if (s->outcnt + len > s->outlen) return 1; while (len--) { s->out[s->outcnt] = #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR dist > s->outcnt ? 0 : #endif s->out[s->outcnt - dist]; s->outcnt++; } } else s->outcnt += len; } } while (symbol != 256); /* end of block symbol */ /* done with a valid fixed or dynamic block */ return 0; } /* * Process a fixed codes block. * * Format notes: * * - This block type can be useful for compressing small amounts of data for * which the size of the code descriptions in a dynamic block exceeds the * benefit of custom codes for that block. For fixed codes, no bits are * spent on code descriptions. Instead the code lengths for literal/length * codes and distance codes are fixed. The specific lengths for each symbol * can be seen in the "for" loops below. * * - The literal/length code is complete, but has two symbols that are invalid * and should result in an error if received. This cannot be implemented * simply as an incomplete code since those two symbols are in the "middle" * of the code. They are eight bits long and the longest literal/length\ * code is nine bits. Therefore the code must be constructed with those * symbols, and the invalid symbols must be detected after decoding. * * - The fixed distance codes also have two invalid symbols that should result * in an error if received. Since all of the distance codes are the same * length, this can be implemented as an incomplete code. Then the invalid * codes are detected while decoding. */ local int fixed(struct state *s) { static int virgin = 1; static short lencnt[MAXBITS+1], lensym[FIXLCODES]; static short distcnt[MAXBITS+1], distsym[MAXDCODES]; static struct huffman lencode, distcode; /* build fixed huffman tables if first call (may not be thread safe) */ if (virgin) { int symbol; short lengths[FIXLCODES]; /* construct lencode and distcode */ lencode.count = lencnt; lencode.symbol = lensym; distcode.count = distcnt; distcode.symbol = distsym; /* literal/length table */ for (symbol = 0; symbol < 144; symbol++) lengths[symbol] = 8; for (; symbol < 256; symbol++) lengths[symbol] = 9; for (; symbol < 280; symbol++) lengths[symbol] = 7; for (; symbol < FIXLCODES; symbol++) lengths[symbol] = 8; construct(&lencode, lengths, FIXLCODES); /* distance table */ for (symbol = 0; symbol < MAXDCODES; symbol++) lengths[symbol] = 5; construct(&distcode, lengths, MAXDCODES); /* do this just once */ virgin = 0; } /* decode data until end-of-block code */ return codes(s, &lencode, &distcode); } /* * Process a dynamic codes block. * * Format notes: * * - A dynamic block starts with a description of the literal/length and * distance codes for that block. New dynamic blocks allow the compressor to * rapidly adapt to changing data with new codes optimized for that data. * * - The codes used by the deflate format are "canonical", which means that * the actual bits of the codes are generated in an unambiguous way simply * from the number of bits in each code. Therefore the code descriptions * are simply a list of code lengths for each symbol. * * - The code lengths are stored in order for the symbols, so lengths are * provided for each of the literal/length symbols, and for each of the * distance symbols. * * - If a symbol is not used in the block, this is represented by a zero as * as the code length. This does not mean a zero-length code, but rather * that no code should be created for this symbol. There is no way in the * deflate format to represent a zero-length code. * * - The maximum number of bits in a code is 15, so the possible lengths for * any code are 1..15. * * - The fact that a length of zero is not permitted for a code has an * interesting consequence. Normally if only one symbol is used for a given * code, then in fact that code could be represented with zero bits. However * in deflate, that code has to be at least one bit. So for example, if * only a single distance base symbol appears in a block, then it will be * represented by a single code of length one, in particular one 0 bit. This * is an incomplete code, since if a 1 bit is received, it has no meaning, * and should result in an error. So incomplete distance codes of one symbol * should be permitted, and the receipt of invalid codes should be handled. * * - It is also possible to have a single literal/length code, but that code * must be the end-of-block code, since every dynamic block has one. This * is not the most efficient way to create an empty block (an empty fixed * block is fewer bits), but it is allowed by the format. So incomplete * literal/length codes of one symbol should also be permitted. * * - If there are only literal codes and no lengths, then there are no distance * codes. This is represented by one distance code with zero bits. * * - The list of up to 286 length/literal lengths and up to 30 distance lengths * are themselves compressed using Huffman codes and run-length encoding. In * the list of code lengths, a 0 symbol means no code, a 1..15 symbol means * that length, and the symbols 16, 17, and 18 are run-length instructions. * Each of 16, 17, and 18 are follwed by extra bits to define the length of * the run. 16 copies the last length 3 to 6 times. 17 represents 3 to 10 * zero lengths, and 18 represents 11 to 138 zero lengths. Unused symbols * are common, hence the special coding for zero lengths. * * - The symbols for 0..18 are Huffman coded, and so that code must be * described first. This is simply a sequence of up to 19 three-bit values * representing no code (0) or the code length for that symbol (1..7). * * - A dynamic block starts with three fixed-size counts from which is computed * the number of literal/length code lengths, the number of distance code * lengths, and the number of code length code lengths (ok, you come up with * a better name!) in the code descriptions. For the literal/length and * distance codes, lengths after those provided are considered zero, i.e. no * code. The code length code lengths are received in a permuted order (see * the order[] array below) to make a short code length code length list more * likely. As it turns out, very short and very long codes are less likely * to be seen in a dynamic code description, hence what may appear initially * to be a peculiar ordering. * * - Given the number of literal/length code lengths (nlen) and distance code * lengths (ndist), then they are treated as one long list of nlen + ndist * code lengths. Therefore run-length coding can and often does cross the * boundary between the two sets of lengths. * * - So to summarize, the code description at the start of a dynamic block is * three counts for the number of code lengths for the literal/length codes, * the distance codes, and the code length codes. This is followed by the * code length code lengths, three bits each. This is used to construct the * code length code which is used to read the remainder of the lengths. Then * the literal/length code lengths and distance lengths are read as a single * set of lengths using the code length codes. Codes are constructed from * the resulting two sets of lengths, and then finally you can start * decoding actual compressed data in the block. * * - For reference, a "typical" size for the code description in a dynamic * block is around 80 bytes. */ local int dynamic(struct state *s) { int nlen, ndist, ncode; /* number of lengths in descriptor */ int index; /* index of lengths[] */ int err; /* construct() return value */ short lengths[MAXCODES]; /* descriptor code lengths */ short lencnt[MAXBITS+1], lensym[MAXLCODES]; /* lencode memory */ short distcnt[MAXBITS+1], distsym[MAXDCODES]; /* distcode memory */ struct huffman lencode, distcode; /* length and distance codes */ static const short order[19] = /* permutation of code length codes */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; /* construct lencode and distcode */ lencode.count = lencnt; lencode.symbol = lensym; distcode.count = distcnt; distcode.symbol = distsym; /* get number of lengths in each table, check lengths */ nlen = bits(s, 5) + 257; ndist = bits(s, 5) + 1; ncode = bits(s, 4) + 4; if (nlen > MAXLCODES || ndist > MAXDCODES) return -3; /* bad counts */ /* read code length code lengths (really), missing lengths are zero */ for (index = 0; index < ncode; index++) lengths[order[index]] = bits(s, 3); for (; index < 19; index++) lengths[order[index]] = 0; /* build huffman table for code lengths codes (use lencode temporarily) */ err = construct(&lencode, lengths, 19); if (err != 0) /* require complete code set here */ return -4; /* read length/literal and distance code length tables */ index = 0; while (index < nlen + ndist) { int symbol; /* decoded value */ int len; /* last length to repeat */ symbol = decode(s, &lencode); if (symbol < 0) return symbol; /* invalid symbol */ if (symbol < 16) /* length in 0..15 */ lengths[index++] = symbol; else { /* repeat instruction */ len = 0; /* assume repeating zeros */ if (symbol == 16) { /* repeat last length 3..6 times */ if (index == 0) return -5; /* no last length! */ len = lengths[index - 1]; /* last length */ symbol = 3 + bits(s, 2); } else if (symbol == 17) /* repeat zero 3..10 times */ symbol = 3 + bits(s, 3); else /* == 18, repeat zero 11..138 times */ symbol = 11 + bits(s, 7); if (index + symbol > nlen + ndist) return -6; /* too many lengths! */ while (symbol--) /* repeat last or zero symbol times */ lengths[index++] = len; } } /* check for end-of-block code -- there better be one! */ if (lengths[256] == 0) return -9; /* build huffman table for literal/length codes */ err = construct(&lencode, lengths, nlen); if (err && (err < 0 || nlen != lencode.count[0] + lencode.count[1])) return -7; /* incomplete code ok only for single length 1 code */ /* build huffman table for distance codes */ err = construct(&distcode, lengths + nlen, ndist); if (err && (err < 0 || ndist != distcode.count[0] + distcode.count[1])) return -8; /* incomplete code ok only for single length 1 code */ /* decode data until end-of-block code */ return codes(s, &lencode, &distcode); } /* * Inflate source to dest. On return, destlen and sourcelen are updated to the * size of the uncompressed data and the size of the deflate data respectively. * On success, the return value of puff() is zero. If there is an error in the * source data, i.e. it is not in the deflate format, then a negative value is * returned. If there is not enough input available or there is not enough * output space, then a positive error is returned. In that case, destlen and * sourcelen are not updated to facilitate retrying from the beginning with the * provision of more input data or more output space. In the case of invalid * inflate data (a negative error), the dest and source pointers are updated to * facilitate the debugging of deflators. * * puff() also has a mode to determine the size of the uncompressed output with * no output written. For this dest must be (unsigned char *)0. In this case, * the input value of *destlen is ignored, and on return *destlen is set to the * size of the uncompressed output. * * The return codes are: * * 2: available inflate data did not terminate * 1: output space exhausted before completing inflate * 0: successful inflate * -1: invalid block type (type == 3) * -2: stored block length did not match one's complement * -3: dynamic block code description: too many length or distance codes * -4: dynamic block code description: code lengths codes incomplete * -5: dynamic block code description: repeat lengths with no first length * -6: dynamic block code description: repeat more than specified lengths * -7: dynamic block code description: invalid literal/length code lengths * -8: dynamic block code description: invalid distance code lengths * -9: dynamic block code description: missing end-of-block code * -10: invalid literal/length or distance code in fixed or dynamic block * -11: distance is too far back in fixed or dynamic block * * Format notes: * * - Three bits are read for each block to determine the kind of block and * whether or not it is the last block. Then the block is decoded and the * process repeated if it was not the last block. * * - The leftover bits in the last byte of the deflate data after the last * block (if it was a fixed or dynamic block) are undefined and have no * expected values to check. */ int _puff(unsigned char *dest, /* pointer to destination pointer */ unsigned long *destlen, /* amount of output space */ const unsigned char *source, /* pointer to source data pointer */ unsigned long *sourcelen) /* amount of input available */ { struct state s; /* input/output state */ int last, type; /* block information */ int err; /* return value */ /* initialize output state */ s.out = dest; s.outlen = *destlen; /* ignored if dest is NIL */ s.outcnt = 0; /* initialize input state */ s.in = source; s.inlen = *sourcelen; s.incnt = 0; s.bitbuf = 0; s.bitcnt = 0; /* return if bits() or decode() tries to read past available input */ if (setjmp(s.env) != 0) /* if came back here via longjmp() */ err = 2; /* then skip do-loop, return error */ else { /* process blocks until last block or error */ do { last = bits(&s, 1); /* one if last block */ type = bits(&s, 2); /* block type 0..3 */ err = type == 0 ? stored(&s) : (type == 1 ? fixed(&s) : (type == 2 ? dynamic(&s) : -1)); /* type == 3, invalid */ if (err != 0) break; /* return with error */ } while (!last); } /* update the lengths and return */ if (err <= 0) { *destlen = s.outcnt; *sourcelen = s.incnt; } return err; }
40,512
876
jart/cosmopolitan
false
cosmopolitan/third_party/puff/puff.h
#ifndef COSMOPOLITAN_THIRD_PARTY_ZLIB_PUFF_H_ #define COSMOPOLITAN_THIRD_PARTY_ZLIB_PUFF_H_ #if !(__ASSEMBLER__ + __LINKER__ + 0) COSMOPOLITAN_C_START_ #ifndef NIL #define NIL ((unsigned char *)0) /* for no output option */ #endif int _puff(unsigned char *dest, unsigned long *destlen, const unsigned char *source, unsigned long *sourcelen); COSMOPOLITAN_C_END_ #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_THIRD_PARTY_ZLIB_PUFF_H_ */
473
16
jart/cosmopolitan
false
cosmopolitan/third_party/puff/puff.mk
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐ #───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘ PKGS += THIRD_PARTY_PUFF THIRD_PARTY_PUFF_ARTIFACTS += THIRD_PARTY_PUFF_A THIRD_PARTY_PUFF = $(THIRD_PARTY_PUFF_A_DEPS) $(THIRD_PARTY_PUFF_A) THIRD_PARTY_PUFF_A = o/$(MODE)/third_party/puff/puff.a THIRD_PARTY_PUFF_A_FILES := $(wildcard third_party/puff/*) THIRD_PARTY_PUFF_A_HDRS = $(filter %.h,$(THIRD_PARTY_PUFF_A_FILES)) THIRD_PARTY_PUFF_A_SRCS = $(filter %.c,$(THIRD_PARTY_PUFF_A_FILES)) THIRD_PARTY_PUFF_A_OBJS = \ $(THIRD_PARTY_PUFF_A_SRCS:%.c=o/$(MODE)/%.o) THIRD_PARTY_PUFF_A_CHECKS = \ $(THIRD_PARTY_PUFF_A).pkg \ $(THIRD_PARTY_PUFF_A_HDRS:%=o/$(MODE)/%.ok) THIRD_PARTY_PUFF_A_DIRECTDEPS = \ LIBC_INTRIN \ LIBC_NEXGEN32E \ LIBC_STUBS THIRD_PARTY_PUFF_A_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_PUFF_A_DIRECTDEPS),$($(x)))) $(THIRD_PARTY_PUFF_A): \ third_party/puff/ \ $(THIRD_PARTY_PUFF_A).pkg \ $(THIRD_PARTY_PUFF_A_OBJS) $(THIRD_PARTY_PUFF_A).pkg: \ $(THIRD_PARTY_PUFF_A_OBJS) \ $(foreach x,$(THIRD_PARTY_PUFF_A_DIRECTDEPS),$($(x)_A).pkg) THIRD_PARTY_PUFF_LIBS = $(foreach x,$(THIRD_PARTY_PUFF_ARTIFACTS),$($(x))) THIRD_PARTY_PUFF_SRCS = $(foreach x,$(THIRD_PARTY_PUFF_ARTIFACTS),$($(x)_SRCS)) THIRD_PARTY_PUFF_HDRS = $(foreach x,$(THIRD_PARTY_PUFF_ARTIFACTS),$($(x)_HDRS)) THIRD_PARTY_PUFF_CHECKS = $(foreach x,$(THIRD_PARTY_PUFF_ARTIFACTS),$($(x)_CHECKS)) THIRD_PARTY_PUFF_OBJS = $(foreach x,$(THIRD_PARTY_PUFF_ARTIFACTS),$($(x)_OBJS)) .PHONY: o/$(MODE)/third_party/puff o/$(MODE)/third_party/puff: \ $(THIRD_PARTY_PUFF_CHECKS)
1,700
46
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/LICENSE
MIT License Copyright (c) 2023 Georgi Gerganov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
1,072
22
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/common-gptneox.h
// -*- c++; c-basic-offset:4 -*- #ifndef COSMOPOLITAN_THIRD_PARTY_RADPAJAMA_COMMON_GPTNEOX_H_ #define COSMOPOLITAN_THIRD_PARTY_RADPAJAMA_COMMON_GPTNEOX_H_ #include "libc/macros.internal.h" #include "libc/runtime/runtime.h" #include "third_party/libcxx/random" #include "third_party/libcxx/string" #include "third_party/libcxx/thread" #include "third_party/libcxx/unordered_map" #include "third_party/libcxx/vector" #include "third_party/radpajama/gptneox.h" #if !(__ASSEMBLER__ + __LINKER__ + 0) // clang-format off // Various helper functions and utilities // // CLI argument parsing // struct gpt_params { int32_t seed = -1; // RNG seed int32_t n_threads = MIN(4, (int32_t) _getcpucount() * 0.75); int32_t n_predict = 128; // new tokens to predict int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions) int32_t n_ctx = 512; // context size int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) int32_t n_keep = 0; // number of tokens to keep from initial prompt // sampling parameters std::unordered_map<gptneox_token, float> logit_bias; // logit bias for specific tokens int32_t top_k = 40; // <= 0 to use vocab size float top_p = 0.95f; // 1.0 = disabled float tfs_z = 1.00f; // 1.0 = disabled float typical_p = 1.00f; // 1.0 = disabled float temp = 0.80f; // 1.0 = disabled float repeat_penalty = 1.10f; // 1.0 = disabled int32_t repeat_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size) float frequency_penalty = 0.00f; // 0.0 = disabled float presence_penalty = 0.00f; // 0.0 = disabled int mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0 float mirostat_tau = 5.00f; // target entropy float mirostat_eta = 0.10f; // learning rate std::string model = "./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat/Instruct-3B-v1-f16.bin"; // model path std::string prompt = ""; std::string path_session = ""; // path to file for saving/loading model eval state std::string input_prefix = ""; // string to prefix user inputs with std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted std::string lora_adapter = ""; // lora adapter path std::string lora_base = ""; // base model path for the lora adapter bool memory_f16 = true; // use f16 instead of f32 for memory kv bool random_prompt = false; // do not randomize prompt if none provided bool use_color = false; // use color to distinguish generations and inputs bool interactive = false; // interactive mode bool embedding = false; // get only sentence embedding bool interactive_first = false; // wait for user input immediately bool instruct = false; // instruction mode bool penalize_nl = true; // consider newlines as a repeatable token bool perplexity = false; // compute perplexity over the prompt bool use_mmap = true; // use mmap for faster loads bool use_mlock = false; // use mlock to keep model in memory bool mem_test = false; // compute maximum memory usage bool verbose_prompt = false; // print prompt tokens before generation }; bool gpt_params_parse(int argc, char ** argv, gpt_params & params); void gpt_print_usage(int argc, char ** argv, const gpt_params & params); std::string gpt_random_prompt(std::mt19937 & rng); // // Vocab utils // std::vector<gptneox_token> gptneox_tokenize(struct gptneox_context * ctx, const std::string & text, bool add_bos); // // Console utils // #define ANSI_COLOR_RED "\x1b[31m" #define ANSI_COLOR_GREEN "\x1b[32m" #define ANSI_COLOR_YELLOW "\x1b[33m" #define ANSI_COLOR_BLUE "\x1b[34m" #define ANSI_COLOR_MAGENTA "\x1b[35m" #define ANSI_COLOR_CYAN "\x1b[36m" #define ANSI_COLOR_RESET "\x1b[0m" #define ANSI_BOLD "\x1b[1m" enum console_color_t { CONSOLE_COLOR_DEFAULT=0, CONSOLE_COLOR_PROMPT, CONSOLE_COLOR_USER_INPUT }; struct console_state { bool use_color = false; console_color_t color = CONSOLE_COLOR_DEFAULT; }; void set_console_color(console_state & con_st, console_color_t color); #if defined (_WIN32) void win32_console_init(bool enable_color); void win32_utf8_encode(const std::wstring & wstr, std::string & str); #endif #endif /* !(__ASSEMBLER__ + __LINKER__ + 0) */ #endif /* COSMOPOLITAN_THIRD_PARTY_RADPAJAMA_COMMON_GPTNEOX_H_ */
4,703
115
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/gptneox-util.h
// -*- c++; c-basic-offset:4 -*- #ifndef GPTNEOX_UTIL_H #define GPTNEOX_UTIL_H #include "libc/calls/calls.h" #include "libc/calls/struct/rlimit.h" #include "libc/calls/struct/rusage.h" #include "libc/calls/weirdtypes.h" #include "libc/errno.h" #include "libc/runtime/pathconf.h" #include "libc/runtime/runtime.h" #include "libc/runtime/sysconf.h" #include "libc/str/str.h" #include "libc/sysv/consts/f.h" #include "libc/sysv/consts/fileno.h" #include "libc/sysv/consts/madv.h" #include "libc/sysv/consts/map.h" #include "libc/sysv/consts/mfd.h" #include "libc/sysv/consts/mlock.h" #include "libc/sysv/consts/mremap.h" #include "libc/sysv/consts/msync.h" #include "libc/sysv/consts/o.h" #include "libc/sysv/consts/ok.h" #include "libc/sysv/consts/posix.h" #include "libc/sysv/consts/prio.h" #include "libc/sysv/consts/prot.h" #include "libc/sysv/consts/rlim.h" #include "libc/sysv/consts/rlimit.h" #include "libc/sysv/consts/rusage.h" #include "libc/time/time.h" #include "third_party/getopt/getopt.h" #include "third_party/ggml/llama_util.h" #include "third_party/libcxx/cerrno" #include "third_party/libcxx/climits" #include "third_party/libcxx/cstdarg" #include "third_party/libcxx/cstdint" #include "third_party/libcxx/cstdio" #include "third_party/libcxx/cstdlib" #include "third_party/libcxx/cstring" #include "third_party/libcxx/string" #include "third_party/libcxx/vector" #include "third_party/musl/crypt.h" #include "third_party/musl/lockf.h" // clang-format off // Internal header to be included only by llama.cpp. // Contains wrappers around OS interfaces. #define GPTNEOX_ASSERT(x) \ do { \ if (!(x)) { \ fprintf(stderr, "GPTNEOX_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ abort(); \ } \ } while (0) #ifdef __GNUC__ #ifdef __MINGW32__ __attribute__((format(gnu_printf, 1, 2))) #else __attribute__((format(printf, 1, 2))) #endif #endif static std::string format(const char * fmt, ...) { va_list ap, ap2; va_start(ap, fmt); va_copy(ap2, ap); int size = vsnprintf(NULL, 0, fmt, ap); GPTNEOX_ASSERT(size >= 0 && size < INT_MAX); std::vector<char> buf(size + 1); int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); GPTNEOX_ASSERT(size2 == size); va_end(ap2); va_end(ap); return std::string(buf.data(), size); } struct gptneox_file { // use FILE * so we don't have to re-open the file to mmap FILE * fp; size_t size; gptneox_file(const char * fname, const char * mode) { fp = std::fopen(fname, mode); if (fp == NULL) { Die("failed to open %s: %s", fname, std::strerror(errno)); } seek(0, SEEK_END); size = tell(); seek(0, SEEK_SET); } size_t tell() const { #ifdef _WIN32 __int64 ret = _ftelli64(fp); #else long ret = std::ftell(fp); #endif GPTNEOX_ASSERT(ret != -1); // this really shouldn't fail return (size_t) ret; } void seek(size_t offset, int whence) { #ifdef _WIN32 int ret = _fseeki64(fp, (__int64) offset, whence); #else int ret = std::fseek(fp, (long) offset, whence); #endif GPTNEOX_ASSERT(ret == 0); // same } void read_raw(void * ptr, size_t size) { if (size == 0) { return; } errno = 0; std::size_t ret = std::fread(ptr, size, 1, fp); if (ferror(fp)) { Die("read error: %s", strerror(errno)); } if (ret != 1) { Die("unexpectedly reached end of file"); } } std::uint32_t read_u32() { std::uint32_t ret; read_raw(&ret, sizeof(ret)); return ret; } std::string read_string(std::uint32_t len) { std::vector<char> chars(len); read_raw(chars.data(), len); return std::string(chars.data(), len); } void write_raw(const void * ptr, size_t size) { if (size == 0) { return; } errno = 0; size_t ret = std::fwrite(ptr, size, 1, fp); if (ret != 1) { Die("write error: %s", strerror(errno)); } } void write_u32(std::uint32_t val) { write_raw(&val, sizeof(val)); } ~gptneox_file() { if (fp) { std::fclose(fp); } } }; #if defined(_WIN32) static std::string gptneox_format_win_err(DWORD err) { LPSTR buf; size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL); if (!size) { return "FormatMessageA failed"; } std::string ret(buf, size); LocalFree(buf); return ret; } #endif struct gptneox_mmap { void * addr; size_t size; gptneox_mmap(const gptneox_mmap &) = delete; #ifdef _POSIX_MAPPED_FILES static constexpr bool SUPPORTED = true; gptneox_mmap(struct gptneox_file * file, bool prefetch = true) { size = file->size; int fd = fileno(file->fp); int flags = MAP_SHARED; #ifdef __linux__ flags |= MAP_POPULATE; #endif addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0); if (addr == MAP_FAILED) { Die("mmap failed: %s", strerror(errno)); } if (prefetch) { // Advise the kernel to preload the mapped memory if (madvise(addr, file->size, MADV_WILLNEED)) { fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n", strerror(errno)); } } } ~gptneox_mmap() { munmap(addr, size); } #elif defined(_WIN32) static constexpr bool SUPPORTED = true; gptneox_mmap(struct gptneox_file * file, bool prefetch = true) { size = file->size; HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp)); HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); DWORD error = GetLastError(); if (hMapping == NULL) { Die("CreateFileMappingA failed: %s", gptneox_format_win_err(error).c_str()); } addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); error = GetLastError(); CloseHandle(hMapping); if (addr == NULL) { Die("MapViewOfFile failed: %s", gptneox_format_win_err(error).c_str()); } #if _WIN32_WINNT >= _WIN32_WINNT_WIN8 if (prefetch) { // Advise the kernel to preload the mapped memory WIN32_MEMORY_RANGE_ENTRY range; range.VirtualAddress = addr; range.NumberOfBytes = (SIZE_T)size; if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) { fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n", gptneox_format_win_err(GetLastError()).c_str()); } } #else #pragma message("warning: You are building for pre-Windows 8; prefetch not supported") #endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8 } ~gptneox_mmap() { if (!UnmapViewOfFile(addr)) { fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n", gptneox_format_win_err(GetLastError()).c_str()); } } #else static constexpr bool SUPPORTED = false; gptneox_mmap(struct gptneox_file *) { Die("mmap not supported"); } #endif }; // Represents some region of memory being locked using mlock or VirtualLock; // will automatically unlock on destruction. struct gptneox_mlock { void * addr = NULL; size_t size = 0; bool failed_already = false; gptneox_mlock() {} gptneox_mlock(const gptneox_mlock &) = delete; ~gptneox_mlock() { if (size) { raw_unlock(addr, size); } } void init(void * addr) { GPTNEOX_ASSERT(this->addr == NULL && this->size == 0); this->addr = addr; } void grow_to(size_t target_size) { GPTNEOX_ASSERT(addr); if (failed_already) { return; } size_t granularity = lock_granularity(); target_size = (target_size + granularity - 1) & ~(granularity - 1); if (target_size > size) { if (raw_lock((uint8_t *) addr + size, target_size - size)) { size = target_size; } else { failed_already = true; } } } #ifdef _POSIX_MEMLOCK_RANGE static constexpr bool SUPPORTED = true; size_t lock_granularity() { return (size_t) sysconf(_SC_PAGESIZE); } #ifdef __APPLE__ #define MLOCK_SUGGESTION \ "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \ "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n" #else #define MLOCK_SUGGESTION \ "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n" #endif bool raw_lock(const void * addr, size_t size) { if (!mlock(addr, size)) { return true; } else { char* errmsg = std::strerror(errno); bool suggest = (errno == ENOMEM); // Check if the resource limit is fine after all struct rlimit lock_limit; if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) suggest = false; if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) suggest = false; fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s", size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : ""); return false; } } #undef MLOCK_SUGGESTION void raw_unlock(void * addr, size_t size) { if (munlock(addr, size)) { fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno)); } } #elif defined(_WIN32) static constexpr bool SUPPORTED = true; size_t lock_granularity() { SYSTEM_INFO si; GetSystemInfo(&si); return (size_t) si.dwPageSize; } bool raw_lock(void * addr, size_t size) { for (int tries = 1; ; tries++) { if (VirtualLock(addr, size)) { return true; } if (tries == 2) { fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n", size, this->size, gptneox_format_win_err(GetLastError()).c_str()); return false; } // It failed but this was only the first try; increase the working // set size and try again. SIZE_T min_ws_size, max_ws_size; if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) { fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n", gptneox_format_win_err(GetLastError()).c_str()); return false; } // Per MSDN: "The maximum number of pages that a process can lock // is equal to the number of pages in its minimum working set minus // a small overhead." // Hopefully a megabyte is enough overhead: size_t increment = size + 1048576; // The minimum must be <= the maximum, so we need to increase both: min_ws_size += increment; max_ws_size += increment; if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) { fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n", gptneox_format_win_err(GetLastError()).c_str()); return false; } } } void raw_unlock(void * addr, size_t size) { if (!VirtualUnlock(addr, size)) { fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n", gptneox_format_win_err(GetLastError()).c_str()); } } #else static constexpr bool SUPPORTED = false; void raw_lock(const void * addr, size_t size) { fprintf(stderr, "warning: mlock not supported on this system\n"); } void raw_unlock(const void * addr, size_t size) {} #endif }; // Replacement for std::vector<uint8_t> that doesn't require zero-initialization. struct gptneox_buffer { uint8_t * addr = NULL; size_t size = 0; void resize(size_t size) { delete[] addr; addr = new uint8_t[size]; this->size = size; } ~gptneox_buffer() { delete[] addr; } }; #ifdef GGML_USE_CUBLAS // MISSING #include "ggml-cuda.h" struct gptneox_ctx_buffer { uint8_t * addr = NULL; size_t size = 0; void resize(size_t size) { if (addr) { ggml_cuda_host_free(addr); } addr = (uint8_t *) ggml_cuda_host_malloc(size); this->size = size; } ~gptneox_ctx_buffer() { if (addr) { ggml_cuda_host_free(addr); } } }; #else typedef gptneox_buffer gptneox_ctx_buffer; #endif #endif
13,233
442
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/main-redpajama.cc
/*-*-mode:c++;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8-*-│ │vi: set net ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ radpajama.com │ │ Copyright (c) 2023 Ariel Núñez │ │ Copyright (c) 2023 Georgi Gerganov │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/calls/calls.h" #include "libc/calls/sigtimedwait.h" #include "libc/calls/struct/sigaction.h" #include "libc/calls/struct/siginfo.h" #include "libc/calls/weirdtypes.h" #include "libc/log/log.h" #include "libc/runtime/pathconf.h" #include "libc/runtime/runtime.h" #include "libc/runtime/sysconf.h" #include "libc/sysv/consts/f.h" #include "libc/sysv/consts/fileno.h" #include "libc/sysv/consts/o.h" #include "libc/sysv/consts/ok.h" #include "libc/sysv/consts/sa.h" #include "libc/sysv/consts/sicode.h" #include "libc/sysv/consts/ss.h" #include "libc/time/time.h" #include "third_party/getopt/getopt.h" #include "third_party/libcxx/cassert" #include "third_party/libcxx/cinttypes" #include "third_party/libcxx/cmath" #include "third_party/libcxx/cstdio" #include "third_party/libcxx/cstring" #include "third_party/libcxx/ctime" #include "third_party/libcxx/fstream" #include "third_party/libcxx/iostream" #include "third_party/libcxx/string" #include "third_party/libcxx/vector" #include "third_party/musl/crypt.h" #include "third_party/musl/lockf.h" #include "third_party/radpajama/common-gptneox.h" #include "third_party/radpajama/gptneox.h" // clang-format off static console_state con_st; static gptneox_context ** g_ctx; static bool is_interacting = false; #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) void sigint_handler(int signo) { set_console_color(con_st, CONSOLE_COLOR_DEFAULT); printf("\n"); // this also force flush stdout. if (signo == SIGINT) { if (!is_interacting) { is_interacting=true; } else { gptneox_print_timings(*g_ctx); _exit(130); } } } #endif int main(int argc, char ** argv) { gpt_params params; params.model = "./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Instruct-3B-v1-f16.bin"; MakeProcessNice(); ShowCrashReports(); if (gpt_params_parse(argc, argv, params) == false) { return 1; } // save choice to use color for later // (note for later: this is a slightly awkward choice) con_st.use_color = params.use_color; #if defined (_WIN32) win32_console_init(params.use_color); #endif if (params.perplexity) { printf("\n************\n"); printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__); printf("************\n\n"); return 0; } if (params.embedding) { printf("\n************\n"); printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__); printf("************\n\n"); return 0; } if (params.n_ctx > 2048) { fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);" "expect poor results\n", __func__, params.n_ctx); } if (params.seed < 0) { params.seed = time(NULL); } fprintf(stderr, "%s: seed = %d\n", __func__, params.seed); std::mt19937 rng(params.seed); if (params.random_prompt) { params.prompt = gpt_random_prompt(rng); } // params.prompt = R"(// this function checks if the number n is prime //bool is_prime(int n) {)"; gptneox_context * ctx; g_ctx = &ctx; // load the model { auto lparams = gptneox_context_default_params(); lparams.n_ctx = params.n_ctx; lparams.n_parts = params.n_parts; lparams.seed = params.seed; lparams.f16_kv = params.memory_f16; lparams.use_mmap = params.use_mmap; lparams.use_mlock = params.use_mlock; ctx = gptneox_init_from_file(params.model.c_str(), lparams); if (ctx == NULL) { fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str()); return 1; } } if (!params.lora_adapter.empty()) { int err = gptneox_apply_lora_from_file(ctx, params.lora_adapter.c_str(), params.lora_base.empty() ? NULL : params.lora_base.c_str(), params.n_threads); if (err != 0) { fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__); return 1; } } // print system information { fprintf(stderr, "\n"); fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", params.n_threads, std::thread::hardware_concurrency(), gptneox_print_system_info()); } // determine the maximum memory usage needed to do inference for the given n_batch and n_predict parameters // uncomment the "used_mem" line in llama.cpp to see the results if (params.mem_test) { { const std::vector<gptneox_token> tmp(params.n_batch, 0); gptneox_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); } { const std::vector<gptneox_token> tmp = { 0, }; gptneox_eval(ctx, tmp.data(), tmp.size(), params.n_predict - 1, params.n_threads); } gptneox_print_timings(ctx); gptneox_free(ctx); return 0; } std::string path_session = params.path_session; std::vector<gptneox_token> session_tokens; if (!path_session.empty()) { fprintf(stderr, "%s: attempting to load saved session from %s..\n", __func__, path_session.c_str()); // REVIEW - fopen to check for existing session FILE * fp = std::fopen(path_session.c_str(), "rb"); if (fp != NULL) { std::fclose(fp); session_tokens.resize(params.n_ctx); size_t n_token_count_out = 0; const size_t n_session_bytes = gptneox_load_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out); session_tokens.resize(n_token_count_out); if (n_session_bytes > 0) { fprintf(stderr, "%s: loaded %zu bytes of session data!\n", __func__, n_session_bytes); } else { fprintf(stderr, "%s: could not load session file, will recreate\n", __func__); } } else { fprintf(stderr, "%s: session file does not exist, will create\n", __func__); } } // tokenize the prompt auto embd_inp = ::gptneox_tokenize(ctx, params.prompt, false); //true); const int n_ctx = gptneox_n_ctx(ctx); if ((int) embd_inp.size() > n_ctx - 4) { fprintf(stderr, "%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4); return 1; } // debug message about similarity of saved session, if applicable size_t n_matching_session_tokens = 0; if (session_tokens.size()) { for (gptneox_token id : session_tokens) { if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) { break; } n_matching_session_tokens++; } if (n_matching_session_tokens >= embd_inp.size()) { fprintf(stderr, "%s: session file has exact match for prompt!\n", __func__); } else if (n_matching_session_tokens < (embd_inp.size() / 2)) { fprintf(stderr, "%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n", __func__, n_matching_session_tokens, embd_inp.size()); } else { fprintf(stderr, "%s: session file matches %zu / %zu tokens of prompt\n", __func__, n_matching_session_tokens, embd_inp.size()); } } // number of tokens to keep when resetting context if (params.n_keep < 0 || params.n_keep > (int)embd_inp.size() || params.instruct) { params.n_keep = (int)embd_inp.size(); } // in instruct mode, we inject a prefix and a suffix to each input by the user if (params.instruct) { params.interactive_first = true; params.antiprompt.push_back("<|prompter|>"); } // enable interactive mode if reverse prompt or interactive start is specified if (params.antiprompt.size() != 0 || params.interactive_first) { params.interactive = true; } // determine newline token auto gptneox_token_newline = ::gptneox_tokenize(ctx, "\n", false); if (params.verbose_prompt) { fprintf(stderr, "\n"); fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str()); fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); for (int i = 0; i < (int) embd_inp.size(); i++) { fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], gptneox_token_to_str(ctx, embd_inp[i])); } if (params.n_keep > 0) { fprintf(stderr, "%s: static prompt based on n_keep: '", __func__); for (int i = 0; i < params.n_keep; i++) { fprintf(stderr, "%s", gptneox_token_to_str(ctx, embd_inp[i])); } fprintf(stderr, "'\n"); } fprintf(stderr, "\n"); } if (params.interactive) { #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) struct sigaction sigint_action; sigint_action.sa_handler = sigint_handler; sigemptyset (&sigint_action.sa_mask); sigint_action.sa_flags = 0; sigaction(SIGINT, &sigint_action, NULL); #elif defined (_WIN32) signal(SIGINT, sigint_handler); #endif fprintf(stderr, "%s: interactive mode on.\n", __func__); if (params.antiprompt.size()) { for (auto antiprompt : params.antiprompt) { fprintf(stderr, "Reverse prompt: '%s'\n", antiprompt.c_str()); } } if (!params.input_prefix.empty()) { fprintf(stderr, "Input prefix: '%s'\n", params.input_prefix.c_str()); } } fprintf(stderr, "sampling: repeat_last_n = %d, repeat_penalty = %f, presence_penalty = %f, frequency_penalty = %f, top_k = %d, tfs_z = %f, top_p = %f, typical_p = %f, temp = %f, mirostat = %d, mirostat_lr = %f, mirostat_ent = %f\n", params.repeat_last_n, params.repeat_penalty, params.presence_penalty, params.frequency_penalty, params.top_k, params.tfs_z, params.top_p, params.typical_p, params.temp, params.mirostat, params.mirostat_eta, params.mirostat_tau); fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep); fprintf(stderr, "\n\n"); // TODO: replace with ring-buffer std::vector<gptneox_token> last_n_tokens(n_ctx); std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); if (params.interactive) { fprintf(stderr, "== Running in interactive mode. ==\n" #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32) " - Press Ctrl+C to interject at any time.\n" #endif " - Press Return to return control to RedPajama.\n" " - If you want to submit another line, end your input in '\\'.\n\n"); is_interacting = params.interactive_first; } bool input_noecho = false; // HACK - because session saving incurs a non-negligible delay, for now skip re-saving session // if we loaded a session with at least 75% similarity. It's currently just used to speed up the // initial prompt so it doesn't need to be an exact match. bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < (embd_inp.size() * 3 / 4); int n_past = 0; int n_remain = params.n_predict; int n_consumed = 0; int n_session_consumed = 0; // the first thing we will do is to output the prompt, so set color accordingly set_console_color(con_st, CONSOLE_COLOR_PROMPT); std::vector<gptneox_token> embd; while (n_remain != 0 || params.interactive) { // predict if (embd.size() > 0) { // infinite text generation via context swapping // if we run out of context: // - take the n_keep first tokens from the original prompt (via n_past) // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches if (n_past + (int) embd.size() > n_ctx) { const int n_left = n_past - params.n_keep; n_past = params.n_keep; // insert n_left/2 tokens at the start of embd from last_n_tokens embd.insert(embd.begin(), last_n_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_n_tokens.end() - embd.size()); // REVIEW - stop saving session if we run out of context path_session = ""; //printf("\n---\n"); //printf("resetting: '"); //for (int i = 0; i < (int) embd.size(); i++) { // printf("%s", gptneox_token_to_str(ctx, embd[i])); //} //printf("'\n"); //printf("\n---\n"); } // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past) // REVIEW if (n_session_consumed < (int) session_tokens.size()) { size_t i = 0; for ( ; i < embd.size(); i++) { if (embd[i] != session_tokens[n_session_consumed]) { session_tokens.resize(n_session_consumed); break; } n_past++; n_session_consumed++; if (n_session_consumed >= (int) session_tokens.size()) { break; } } if (i > 0) { embd.erase(embd.begin(), embd.begin() + i); } } // evaluate tokens in batches // embd is typically prepared beforehand to fit within a batch, but not always for (int i = 0; i < (int) embd.size(); i += params.n_batch) { int n_eval = (int) embd.size() - i; if (n_eval > params.n_batch) { n_eval = params.n_batch; } if (gptneox_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } n_past += n_eval; } if (embd.size() > 0 && !path_session.empty()) { session_tokens.insert(session_tokens.end(), embd.begin(), embd.end()); n_session_consumed = session_tokens.size(); } } embd.clear(); if ((int) embd_inp.size() <= n_consumed && !is_interacting) { // out of user input, sample next token const float temp = params.temp; const int32_t top_k = params.top_k <= 0 ? gptneox_n_vocab(ctx) : params.top_k; const float top_p = params.top_p; const float tfs_z = params.tfs_z; const float typical_p = params.typical_p; const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n; const float repeat_penalty = params.repeat_penalty; const float alpha_presence = params.presence_penalty; const float alpha_frequency = params.frequency_penalty; const int mirostat = params.mirostat; const float mirostat_tau = params.mirostat_tau; const float mirostat_eta = params.mirostat_eta; const bool penalize_nl = params.penalize_nl; // optionally save the session on first sample (for faster prompt loading next time) if (!path_session.empty() && need_to_save_session) { need_to_save_session = false; gptneox_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size()); } gptneox_token id = 0; { auto logits = gptneox_get_logits(ctx); auto n_vocab = gptneox_n_vocab(ctx); // Apply params.logit_bias map for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) { logits[it->first] += it->second; } std::vector<gptneox_token_data> candidates; candidates.reserve(n_vocab); for (gptneox_token token_id = 0; token_id < n_vocab; token_id++) { candidates.emplace_back(gptneox_token_data{token_id, logits[token_id], 0.0f}); } gptneox_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; // Apply penalties gptneox_token nl_token = gptneox_str_to_token(ctx, "\n"); float nl_logit = logits[nl_token]; auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx); gptneox_sample_repetition_penalty(ctx, &candidates_p, last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, last_n_repeat, repeat_penalty); gptneox_sample_frequency_and_presence_penalties(ctx, &candidates_p, last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, last_n_repeat, alpha_frequency, alpha_presence); if (!penalize_nl) { logits[nl_token] = nl_logit; } if (temp <= 0) { // Greedy sampling id = gptneox_sample_token_greedy(ctx, &candidates_p); } else { if (mirostat == 1) { static float mirostat_mu = 2.0f * mirostat_tau; const int mirostat_m = 100; gptneox_sample_temperature(ctx, &candidates_p, temp); id = gptneox_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); } else if (mirostat == 2) { static float mirostat_mu = 2.0f * mirostat_tau; gptneox_sample_temperature(ctx, &candidates_p, temp); id = gptneox_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); } else { // Temperature sampling gptneox_sample_top_k(ctx, &candidates_p, top_k, 1); gptneox_sample_tail_free(ctx, &candidates_p, tfs_z, 1); gptneox_sample_typical(ctx, &candidates_p, typical_p, 1); gptneox_sample_top_p(ctx, &candidates_p, top_p, 1); gptneox_sample_temperature(ctx, &candidates_p, temp); id = gptneox_sample_token(ctx, &candidates_p); } } // printf("`%d`", candidates_p.size); last_n_tokens.erase(last_n_tokens.begin()); last_n_tokens.push_back(id); } // replace end of text token with newline token when in interactive mode if (id == gptneox_token_eos() && params.interactive && !params.instruct) { id = gptneox_token_newline.front(); if (params.antiprompt.size() != 0) { // tokenize and inject first reverse prompt const auto first_antiprompt = ::gptneox_tokenize(ctx, params.antiprompt.front(), false); embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end()); } } // add it to the context embd.push_back(id); // echo this to console input_noecho = false; // decrement remaining sampling budget --n_remain; } else { // some user input remains from prompt or interaction, forward it to processing while ((int) embd_inp.size() > n_consumed) { embd.push_back(embd_inp[n_consumed]); last_n_tokens.erase(last_n_tokens.begin()); last_n_tokens.push_back(embd_inp[n_consumed]); ++n_consumed; if ((int) embd.size() >= params.n_batch) { break; } } } // display text if (!input_noecho) { for (auto id : embd) { printf("%s", gptneox_token_to_str(ctx, id)); } fflush(stdout); } // reset color to default if we there is no pending user input if (!input_noecho && (int)embd_inp.size() == n_consumed) { set_console_color(con_st, CONSOLE_COLOR_DEFAULT); } // in interactive mode, and not currently processing queued inputs; // check if we should prompt the user for more if (params.interactive && (int) embd_inp.size() <= n_consumed) { // check for reverse prompt if (params.antiprompt.size()) { std::string last_output; for (auto id : last_n_tokens) { last_output += gptneox_token_to_str(ctx, id); } // Check if each of the reverse prompts appears at the end of the output. for (std::string & antiprompt : params.antiprompt) { if (last_output.find(antiprompt.c_str(), last_output.length() - antiprompt.length(), antiprompt.length()) != std::string::npos) { is_interacting = true; set_console_color(con_st, CONSOLE_COLOR_USER_INPUT); fflush(stdout); break; } } } if (n_past > 0 && is_interacting) { // potentially set color to indicate we are taking user input set_console_color(con_st, CONSOLE_COLOR_USER_INPUT); #if defined (_WIN32) // Windows: must reactivate sigint handler after each signal signal(SIGINT, sigint_handler); #endif if (params.instruct) { printf("\n> "); } std::string buffer; if (!params.input_prefix.empty()) { buffer += params.input_prefix; printf("%s", buffer.c_str()); } std::string line; bool another_line = true; do { #if defined(_WIN32) std::wstring wline; if (!std::getline(std::wcin, wline)) { // input stream is bad or EOF received return 0; } win32_utf8_encode(wline, line); #else if (!std::getline(std::cin, line)) { // input stream is bad or EOF received return 0; } #endif if (line.empty() || line.back() != '\\') { another_line = false; } else { line.pop_back(); // Remove the continue character } buffer += line + '\n'; // Append the line to the result } while (another_line); // done taking input, reset color set_console_color(con_st, CONSOLE_COLOR_DEFAULT); // Add tokens to embd only if the input buffer is non-empty // Entering a empty line lets the user pass control back if (buffer.length() > 1) { auto line_inp = ::gptneox_tokenize(ctx, buffer, false); embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end()); n_remain -= line_inp.size(); } input_noecho = true; // do not echo this again } if (n_past > 0) { is_interacting = false; } } // end of text token if (!embd.empty() && embd.back() == gptneox_token_eos()) { if (params.instruct) { is_interacting = true; } else { fprintf(stderr, " [end of text]\n"); break; } } // In interactive mode, respect the maximum number of tokens and drop back to user input when reached. if (params.interactive && n_remain <= 0 && params.n_predict != -1) { n_remain = params.n_predict; is_interacting = true; } } #if defined (_WIN32) signal(SIGINT, SIG_DFL); #endif printf("\n\n"); gptneox_print_timings(ctx); gptneox_free(ctx); set_console_color(con_st, CONSOLE_COLOR_DEFAULT); return 0; }
27,956
659
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/quantize-gptneox.cc
/*-*-mode:c++;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8-*-│ │vi: set net ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ radpajama.com │ │ Copyright (c) 2023 Ariel Núñez │ │ Copyright (c) 2023 Georgi Gerganov │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/log/log.h" #include "third_party/ggml/ggml.h" #include "third_party/ggml/llama_util.h" #include "third_party/libcxx/cstdio" #include "third_party/libcxx/map" #include "third_party/libcxx/string" #include "third_party/radpajama/gptneox.h" // clang-format off static const std::map<std::string, enum gptneox_ftype> GPTNEOX_FTYPE_MAP = { {"f16", GPTNEOX_FTYPE_MOSTLY_F16}, {"q4_0", GPTNEOX_FTYPE_MOSTLY_Q4_0}, {"q4_1", GPTNEOX_FTYPE_MOSTLY_Q4_1}, {"q4_2", GPTNEOX_FTYPE_MOSTLY_Q4_2}, //{"q4_3", GPTNEOX_FTYPE_MOSTLY_Q4_3}, {"q5_0", GPTNEOX_FTYPE_MOSTLY_Q5_0}, {"q5_1", GPTNEOX_FTYPE_MOSTLY_Q5_1}, {"q8_0", GPTNEOX_FTYPE_MOSTLY_Q8_0}, }; // usage: // ./quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type // int main(int argc, char ** argv) { MakeProcessNice(); ShowCrashReports(); ggjt_v2(); ggml_time_init(); if (argc < 4) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type [nthread]\n", argv[0]); for (auto it = GPTNEOX_FTYPE_MAP.begin(); it != GPTNEOX_FTYPE_MAP.end(); it++) { fprintf(stderr, " type = \"%s\" or %d\n", it->first.c_str(), it->second); } return 1; } // needed to initialize f16 tables { struct ggml_init_params params = { 0, NULL, false }; struct ggml_context * ctx = ggml_init(params); ggml_free(ctx); } const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; if (fname_inp == fname_out) { fprintf(stderr, "%s: input and output names are same\n", fname_inp.c_str()); exit(1); } enum gptneox_ftype ftype; if (!is_integer_str(argv[3])) { auto it = GPTNEOX_FTYPE_MAP.find(argv[3]); if (it == GPTNEOX_FTYPE_MAP.end()) { fprintf(stderr, "%s: unknown ftype '%s'\n", __func__, argv[3]); return 1; } ftype = it->second; } else { ftype = (enum gptneox_ftype)atoi(argv[3]); } int nthread = argc > 4 ? atoi(argv[4]) : 0; const int64_t t_main_start_us = ggml_time_us(); int64_t t_quantize_us = 0; // load the model { const int64_t t_start_us = ggml_time_us(); if (gptneox_model_quantize(fname_inp.c_str(), fname_out.c_str(), ftype, nthread)) { fprintf(stderr, "%s: failed to quantize model from '%s'\n", __func__, fname_inp.c_str()); return 1; } t_quantize_us = ggml_time_us() - t_start_us; } // report timing { const int64_t t_main_end_us = ggml_time_us(); printf("\n"); printf("%s: quantize time = %8.2f ms\n", __func__, t_quantize_us/1000.0); printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0); } return 0; }
5,515
123
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/README.cosmo
DESCRIPTION radpajama is a port of ggml for the open source Red Pajama LLM. It started as a fork of redpajama.cpp from Together Computer. LICENSE MIT ORIGIN github.com/togethercomputer/redpajama.cpp/ commit bfa6466199b8ef92185ecb72e2a550e12baf6602 Author: Szhangce <[email protected]> Date: Tue May 9 00:50:22 2023 +0200 radpajama : Update README.md LOCAL CHANGES - Updated headers for COSMO build.
430
20
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/gptneox.h
// -*- c++; c-basic-offset:4 -*- #ifndef GPTNEOX_H #define GPTNEOX_H // clang-format off #ifdef GPTNEOX_SHARED # if defined(_WIN32) && !defined(__MINGW32__) # ifdef GPTNEOX_BUILD # define GPTNEOX_API __declspec(dllexport) # else # define GPTNEOX_API __declspec(dllimport) # endif # else # define GPTNEOX_API __attribute__ ((visibility ("default"))) # endif #else # define GPTNEOX_API #endif #define GPTNEOX_FILE_VERSION 1 #define GPTNEOX_FILE_MAGIC 0x67676a74 // 'ggjt' in hex #define GPTNEOX_FILE_MAGIC_UNVERSIONED 0x67676d6c // pre-versioned files #ifdef __cplusplus extern "C" { #endif // // C interface // // TODO: show sample usage // struct gptneox_context; typedef int gptneox_token; typedef struct gptneox_token_data { gptneox_token id; // token id float logit; // log-odds of the token float p; // probability of the token } gptneox_token_data; typedef struct gptneox_token_data_array { gptneox_token_data * data; size_t size; bool sorted; } gptneox_token_data_array; typedef void (*gptneox_progress_callback)(float progress, void *ctx); struct gptneox_context_params { int n_ctx; // text context int n_parts; // -1 for default int seed; // RNG seed, 0 for random bool f16_kv; // use fp16 for KV cache bool logits_all; // the gptneox_eval() call computes all logits, not just the last one bool vocab_only; // only load the vocabulary, no weights bool use_mmap; // use mmap if possible bool use_mlock; // force system to keep model in RAM bool embedding; // embedding mode only // called with a progress value between 0 and 1, pass NULL to disable gptneox_progress_callback progress_callback; // context pointer passed to the progress callback void * progress_callback_user_data; }; // model file types enum gptneox_ftype { GPTNEOX_FTYPE_ALL_F32 = 0, GPTNEOX_FTYPE_MOSTLY_F16 = 1, // except 1d tensors GPTNEOX_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors GPTNEOX_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors GPTNEOX_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 GPTNEOX_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors // GPTNEOX_FTYPE_MOSTLY_Q4_3 (6) support has been removed GPTNEOX_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors GPTNEOX_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors GPTNEOX_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors }; GPTNEOX_API struct gptneox_context_params gptneox_context_default_params(); GPTNEOX_API bool gptneox_mmap_supported(); GPTNEOX_API bool gptneox_mlock_supported(); // Various functions for loading a ggml llama model. // Allocate (almost) all memory needed for the model. // Return NULL on failure GPTNEOX_API struct gptneox_context * gptneox_init_from_file( const char * path_model, struct gptneox_context_params params); // Frees all allocated memory GPTNEOX_API void gptneox_free(struct gptneox_context * ctx); // TODO: not great API - very likely to change // Returns 0 on success // nthread - how many threads to use. If <=0, will use std::thread::hardware_concurrency(), else the number given GPTNEOX_API int gptneox_model_quantize( const char * fname_inp, const char * fname_out, enum gptneox_ftype ftype, int nthread); GPTNEOX_API int gptneox_model_copy( const char * fname_inp, const char * fname_out, enum gptneox_ftype ftype); // Apply a LoRA adapter to a loaded model // path_base_model is the path to a higher quality model to use as a base for // the layers modified by the adapter. Can be NULL to use the current loaded model. // The model needs to be reloaded before applying a new adapter, otherwise the adapter // will be applied on top of the previous one // Returns 0 on success GPTNEOX_API int gptneox_apply_lora_from_file( struct gptneox_context * ctx, const char * path_lora, const char * path_base_model, int n_threads); // Returns the number of tokens in the KV cache GPTNEOX_API int gptneox_get_kv_cache_token_count(struct gptneox_context * ctx); // Sets the current rng seed. GPTNEOX_API void gptneox_set_rng_seed(struct gptneox_context * ctx, int seed); // Returns the size in bytes of the state (rng, logits, embedding and kv_cache) GPTNEOX_API size_t gptneox_get_state_size(struct gptneox_context * ctx); // Copies the state to the specified destination address. // Destination needs to have allocated enough memory. // Returns the number of bytes copied GPTNEOX_API size_t gptneox_copy_state_data(struct gptneox_context * ctx, uint8_t * dest); // Set the state reading from the specified address // Returns the number of bytes read GPTNEOX_API size_t gptneox_set_state_data(struct gptneox_context * ctx, const uint8_t * src); // Save/load session file GPTNEOX_API size_t gptneox_load_session_file(struct gptneox_context * ctx, const char * path_session, gptneox_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out); GPTNEOX_API size_t gptneox_save_session_file(struct gptneox_context * ctx, const char * path_session, const gptneox_token * tokens, size_t n_token_count); // Run the llama inference to obtain the logits and probabilities for the next token. // tokens + n_tokens is the provided batch of new tokens to process // n_past is the number of tokens to use from previous eval calls // Returns 0 on success GPTNEOX_API int gptneox_eval( struct gptneox_context * ctx, const gptneox_token * tokens, int n_tokens, int n_past, int n_threads); // Convert the provided text into tokens. // The tokens pointer must be large enough to hold the resulting tokens. // Returns the number of tokens on success, no more than n_max_tokens // Returns a negative number on failure - the number of tokens that would have been returned // TODO: not sure if correct GPTNEOX_API int gptneox_tokenize( struct gptneox_context * ctx, const char * text, gptneox_token * tokens, int n_max_tokens, bool add_bos); GPTNEOX_API int gptneox_n_vocab(struct gptneox_context * ctx); GPTNEOX_API int gptneox_n_ctx (struct gptneox_context * ctx); GPTNEOX_API int gptneox_n_embd (struct gptneox_context * ctx); // Token logits obtained from the last call to gptneox_eval() // The logits for the last token are stored in the last row // Can be mutated in order to change the probabilities of the next token // Rows: n_tokens // Cols: n_vocab GPTNEOX_API float * gptneox_get_logits(struct gptneox_context * ctx); // Get the embeddings for the input // shape: [n_embd] (1-dimensional) GPTNEOX_API float * gptneox_get_embeddings(struct gptneox_context * ctx); // Token Id -> String. Uses the vocabulary in the provided context GPTNEOX_API const char * gptneox_token_to_str(struct gptneox_context * ctx, gptneox_token token); // String -> Token Id. Uses the vocabulary in the provided context GPTNEOX_API gptneox_token gptneox_str_to_token(struct gptneox_context * ctx, const char * str); // Special tokens GPTNEOX_API gptneox_token gptneox_token_bos(); GPTNEOX_API gptneox_token gptneox_token_eos(); // GPTNEOX_API gptneox_token gptneox_token_nl(); // TODO: improve the last_n_tokens interface ? GPTNEOX_API gptneox_token gptneox_sample_top_p_top_k( struct gptneox_context * ctx, const gptneox_token * last_n_tokens_data, int last_n_tokens_size, int top_k, float top_p, float temp, float repeat_penalty); // Sampling functions /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix. GPTNEOX_API void gptneox_sample_repetition_penalty(struct gptneox_context * ctx, gptneox_token_data_array * candidates, gptneox_token * last_tokens, size_t last_tokens_size, float penalty); /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details. GPTNEOX_API void gptneox_sample_frequency_and_presence_penalties(struct gptneox_context * ctx, gptneox_token_data_array * candidates, gptneox_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence); /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. GPTNEOX_API void gptneox_sample_softmax(struct gptneox_context * ctx, gptneox_token_data_array * candidates); /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 GPTNEOX_API void gptneox_sample_top_k(struct gptneox_context * ctx, gptneox_token_data_array * candidates, int k, size_t min_keep); /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751 GPTNEOX_API void gptneox_sample_top_p(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float p, size_t min_keep); /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/. GPTNEOX_API void gptneox_sample_tail_free(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float z, size_t min_keep); /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. GPTNEOX_API void gptneox_sample_typical(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float p, size_t min_keep); GPTNEOX_API void gptneox_sample_temperature(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float temp); /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. /// @param candidates A vector of `gptneox_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm. /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. GPTNEOX_API gptneox_token gptneox_sample_token_mirostat(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float tau, float eta, int m, float * mu); /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words. /// @param candidates A vector of `gptneox_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text. /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text. /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates. /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal. GPTNEOX_API gptneox_token gptneox_sample_token_mirostat_v2(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float tau, float eta, float * mu); /// @details Selects the token with the highest probability. GPTNEOX_API gptneox_token gptneox_sample_token_greedy(struct gptneox_context * ctx, gptneox_token_data_array * candidates); /// @details Randomly selects a token from the candidates based on their probabilities. GPTNEOX_API gptneox_token gptneox_sample_token(struct gptneox_context * ctx, gptneox_token_data_array * candidates); // Performance information GPTNEOX_API void gptneox_print_timings(struct gptneox_context * ctx); GPTNEOX_API void gptneox_reset_timings(struct gptneox_context * ctx); // Print system information GPTNEOX_API const char * gptneox_print_system_info(void); #ifdef __cplusplus } #endif // Internal API to be implemented by llama.cpp and used by tests/benchmarks only #ifdef GPTNEOX_API_INTERNAL #include "third_party/libcxx/vector" #include "third_party/libcxx/string" struct ggml_tensor; std::vector<std::pair<std::string, struct ggml_tensor *>>& gptneox_internal_get_tensor_map(struct gptneox_context * ctx); #endif #endif // GPTNEOX_H
14,385
274
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/README.md
# gglm Support for RedPajama Model ## Ackonwledgement We highly appreciate the great effort from the fork of [gptneox.cpp](https://github.com/byroneverson/gptneox.cpp). Our support of the RedPajama Model is mainly based on this implementation. We extend the model configure and fixed a bug when setting use_parallel_residual flag to False in their original implementation. We also extend the chat model for RedPajama. ## Usage: ### RedPajama Chat model: - Make the code: make redpajama-chat quantize-gptneox - Prepare the RedPajama model (f16 and q4_0) for gglm: bash ./examples/redpajama/scripts/install-RedPajama-INCITE-Chat-3B-v1.sh - Run RedPajama chat model (fp16): ./redpajama-chat -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin \ -c 2048 \ -b 128 \ -n 1 \ -t 8 \ --instruct \ --color \ --top_k 30 \ --top_p 0.95 \ --temp 0.8 \ --repeat_last_n 3 \ --repeat_penalty 1.1 \ --seed 0 Note that you may need to install torch and transformers to run the above scripts, e.g.: pip install torch==2.0.0 pip install transformers==4.28.1 - Run RedPajama chat model (q4_0): ./redpajama-chat -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-q4_0.bin \ -c 2048 \ -b 128 \ -n 1 \ -t 8 \ --instruct \ --color \ --top_k 30 \ --top_p 0.95 \ --temp 0.8 \ --repeat_last_n 3 \ --repeat_penalty 1.1 \ --seed 0 - Run other quantized version of RedPajama Chat model (Make sure you get the f16 model prepared before you run this): - Make the code to quantize the model if you have not: make quantize-gptneox - Generate the quantized model, the supported types include: q4_0, q4_1, q4_2, q5_0, q5_1, and q8_0. For example, to run q4_1, you need to do the following convertion: python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin --quantize-output-type q4_1 - Then you can chat with the quantized model: ./redpajama-chat -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-q4_1.bin \ -c 2048 \ -b 128 \ -n 1 \ -t 8 \ --instruct \ --color \ --top_k 30 \ --top_p 0.95 \ --temp 0.8 \ --repeat_last_n 3 \ --repeat_penalty 1.1 \ --seed 0 ### RedPajama Base/Instruct model: - Make the code: make redpajama quantize-gptneox - Prepare the RedPajama Base/Instruct model (f16 and q4_0) for gglm: bash ./examples/redpajama/scripts/install-RedPajama-INCITE-Base-3B-v1.sh # Or bash ./examples/redpajama/scripts/install-RedPajama-INCITE-Instruct-3B-v1.sh - Run other quantize version of RedPajama Base/Instruct model (Make sure you get the f16 model prepared before you run this). Then you can generate the quantized model, the supported types include: q4_0, q4_1, q4_2, q5_0, q5_1, and q8_0. For example, to run q4_1, you need to do the following convertion, e.g for RedPajama-Base q8_0: python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Base-3B-v1-f16.bin --quantize-output-type q8_0 - Run RedPajama Base/Instruct model (e.g., RedPajama-Instruct q8_0) : ./redpajama -m ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Instruct-3B-v1-q8_0.bin \ -c 2048 \ -b 128 \ -n 1 \ -t 8 \ --color \ --top_k 30 \ --top_p 0.95 \ --temp 0.8 \ --repeat_last_n 3 \ --repeat_penalty 1.1 \ --seed 0 \ --n_predict 256 \ --verbose-prompt \ -p "How to schedule a tour to Anfield:" ## Attribution The following files are covered by a MIT license and were taken from: https://github.com/byroneverson/gptneox.cpp Thank you Byron. ``` common-gptneox.cpp copy-gptneox.cpp gptneox.cpp quantize-gptneox.cpp common-gptneox.h gptneox-util.h gptneox.h convert_gptneox_to_ggml.py quantize-gptneox.py ```
4,253
144
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/radpajama.mk
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐ #───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘ PKGS += THIRD_PARTY_RADPAJAMA ################################################################################ # redpajama library code common to both executables below THIRD_PARTY_RADPAJAMA_ARTIFACTS += THIRD_PARTY_RADPAJAMA_A THIRD_PARTY_RADPAJAMA = $(THIRD_PARTY_RADPAJAMA_A_DEPS) $(THIRD_PARTY_RADPAJAMA_A) THIRD_PARTY_RADPAJAMA_A = o/$(MODE)/third_party/radpajama/radpajama.a THIRD_PARTY_RADPAJAMA_A_OBJS = $(THIRD_PARTY_RADPAJAMA_A_SRCS:%.cc=o/$(MODE)/%.o) THIRD_PARTY_RADPAJAMA_A_FILES = $(THIRD_PARTY_RADPAJAMA_A_SRCS) $(THIRD_PARTY_RADPAJAMA_A_HDRS) THIRD_PARTY_RADPAJAMA_A_CHECKS = $(THIRD_PARTY_RADPAJAMA_A).pkg $(THIRD_PARTY_RADPAJAMA_A_HDRS:%=o/$(MODE)/%.okk) THIRD_PARTY_RADPAJAMA_A_HDRS = \ third_party/radpajama/common-gptneox.h \ third_party/radpajama/gptneox-util.h \ third_party/radpajama/gptneox.h THIRD_PARTY_RADPAJAMA_A_SRCS = \ third_party/radpajama/common-gptneox.cc \ third_party/radpajama/gptneox.cc \ THIRD_PARTY_RADPAJAMA_A_DIRECTDEPS = \ LIBC_CALLS \ LIBC_FMT \ LIBC_INTRIN \ LIBC_MEM \ LIBC_NEXGEN32E \ LIBC_RUNTIME \ LIBC_STDIO \ LIBC_STR \ LIBC_STUBS \ LIBC_SYSV \ LIBC_THREAD \ LIBC_TINYMATH \ THIRD_PARTY_COMPILER_RT \ THIRD_PARTY_GGML \ THIRD_PARTY_LIBCXX THIRD_PARTY_RADPAJAMA_A_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_RADPAJAMA_A_DIRECTDEPS),$($(x)))) $(THIRD_PARTY_RADPAJAMA_A): \ third_party/radpajama/ \ $(THIRD_PARTY_RADPAJAMA_A).pkg \ $(THIRD_PARTY_RADPAJAMA_A_OBJS) $(THIRD_PARTY_RADPAJAMA_A).pkg: \ $(THIRD_PARTY_RADPAJAMA_A_OBJS) \ $(foreach x,$(THIRD_PARTY_RADPAJAMA_A_DIRECTDEPS),$($(x)_A).pkg) ################################################################################ # two executable programs for running inference on redpajama models # # make -j8 o//third_party/radpajama/radpajama.com # make -j8 o//third_party/radpajama/radpajama-chat.com # make -j8 o//third_party/radpajama/radpajama-copy.com # make -j8 o//third_party/radpajama/radpajama-quantize.com THIRD_PARTY_RADPAJAMA_ARTIFACTS += THIRD_PARTY_RADPAJAMA_MAIN THIRD_PARTY_RADPAJAMA_MAIN_OBJS = $(THIRD_PARTY_RADPAJAMA_MAIN_SRCS:%.cc=o/$(MODE)/%.o) THIRD_PARTY_RADPAJAMA_MAIN_BINS = $(THIRD_PARTY_RADPAJAMA_COMS) $(THIRD_PARTY_RADPAJAMA_COMS:%=%.dbg) THIRD_PARTY_RADPAJAMA_MAIN_COMS = \ o/$(MODE)/third_party/radpajama/radpajama.com \ o/$(MODE)/third_party/radpajama/radpajama-chat.com \ o/$(MODE)/third_party/radpajama/radpajama-copy.com \ o/$(MODE)/third_party/radpajama/radpajama-quantize.com THIRD_PARTY_RADPAJAMA_MAIN_SRCS = \ third_party/radpajama/main-redpajama.cc \ third_party/radpajama/main-redpajama-chat.cc \ third_party/radpajama/copy-gptneox.cc \ third_party/radpajama/quantize-gptneox.cc THIRD_PARTY_RADPAJAMA_MAIN_DIRECTDEPS = \ LIBC_CALLS \ LIBC_FMT \ LIBC_INTRIN \ LIBC_LOG \ LIBC_NEXGEN32E \ LIBC_RUNTIME \ LIBC_STDIO \ LIBC_STR \ LIBC_STUBS \ THIRD_PARTY_GGML \ THIRD_PARTY_RADPAJAMA \ THIRD_PARTY_LIBCXX THIRD_PARTY_RADPAJAMA_MAIN_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_RADPAJAMA_MAIN_DIRECTDEPS),$($(x)))) o/$(MODE)/third_party/radpajama/main.pkg: \ $(THIRD_PARTY_RADPAJAMA_MAIN_OBJS) \ $(foreach x,$(THIRD_PARTY_RADPAJAMA_MAIN_DIRECTDEPS),$($(x)_A).pkg) o/$(MODE)/third_party/radpajama/radpajama.com.dbg: \ o/$(MODE)/third_party/radpajama/main.pkg \ $(THIRD_PARTY_RADPAJAMA_MAIN_DEPS) \ o/$(MODE)/third_party/radpajama/main-redpajama.o \ $(CRT) \ $(APE_NO_MODIFY_SELF) @$(APELINK) o/$(MODE)/third_party/radpajama/radpajama-chat.com.dbg: \ o/$(MODE)/third_party/radpajama/main.pkg \ $(THIRD_PARTY_RADPAJAMA_MAIN_DEPS) \ o/$(MODE)/third_party/radpajama/main-redpajama-chat.o \ $(CRT) \ $(APE_NO_MODIFY_SELF) @$(APELINK) o/$(MODE)/third_party/radpajama/radpajama-copy.com.dbg: \ o/$(MODE)/third_party/radpajama/main.pkg \ $(THIRD_PARTY_RADPAJAMA_MAIN_DEPS) \ o/$(MODE)/third_party/radpajama/copy-gptneox.o \ $(CRT) \ $(APE_NO_MODIFY_SELF) @$(APELINK) o/$(MODE)/third_party/radpajama/radpajama-quantize.com.dbg: \ o/$(MODE)/third_party/radpajama/main.pkg \ $(THIRD_PARTY_RADPAJAMA_MAIN_DEPS) \ o/$(MODE)/third_party/radpajama/quantize-gptneox.o \ $(CRT) \ $(APE_NO_MODIFY_SELF) @$(APELINK) ################################################################################ # package level definitions THIRD_PARTY_RADPAJAMA_LIBS = $(foreach x,$(THIRD_PARTY_RADPAJAMA_ARTIFACTS),$($(x))) THIRD_PARTY_RADPAJAMA_COMS = $(foreach x,$(THIRD_PARTY_RADPAJAMA_ARTIFACTS),$($(x)_COMS)) THIRD_PARTY_RADPAJAMA_BINS = $(foreach x,$(THIRD_PARTY_RADPAJAMA_ARTIFACTS),$($(x)_BINS)) THIRD_PARTY_RADPAJAMA_SRCS = $(foreach x,$(THIRD_PARTY_RADPAJAMA_ARTIFACTS),$($(x)_SRCS)) THIRD_PARTY_RADPAJAMA_HDRS = $(foreach x,$(THIRD_PARTY_RADPAJAMA_ARTIFACTS),$($(x)_HDRS)) THIRD_PARTY_RADPAJAMA_OBJS = $(foreach x,$(THIRD_PARTY_RADPAJAMA_ARTIFACTS),$($(x)_OBJS)) THIRD_PARTY_RADPAJAMA_CHECKS = $(foreach x,$(THIRD_PARTY_RADPAJAMA_ARTIFACTS),$($(x)_CHECKS)) $(THIRD_PARTY_RADPAJAMA_OBJS): third_party/radpajama/radpajama.mk .PHONY: o/$(MODE)/third_party/radpajama o/$(MODE)/third_party/radpajama: \ $(THIRD_PARTY_RADPAJAMA_BINS) \ $(THIRD_PARTY_RADPAJAMA_CHECKS)
5,606
147
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/gptneox.cc
/*-*-mode:c++;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8-*-│ │vi: set net ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ radpajama.com │ │ Copyright (c) 2023 Ariel Núñez │ │ Copyright (c) 2023 Georgi Gerganov │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "third_party/radpajama/gptneox.h" #include "libc/intrin/bits.h" #include "libc/str/str.h" #include "libc/sysv/consts/posix.h" #include "third_party/ggml/fp16.h" #include "third_party/ggml/ggml.h" #include "third_party/ggml/llama_util.h" #include "third_party/libcxx/algorithm" #include "third_party/libcxx/array" #include "third_party/libcxx/atomic" #include "third_party/libcxx/cassert" #include "third_party/libcxx/cinttypes" #include "third_party/libcxx/climits" #include "third_party/libcxx/cstdint" #include "third_party/libcxx/cstdio" #include "third_party/libcxx/cstring" #include "third_party/libcxx/ctime" #include "third_party/libcxx/fstream" #include "third_party/libcxx/initializer_list" #include "third_party/libcxx/map" #include "third_party/libcxx/memory" #include "third_party/libcxx/mutex" #include "third_party/libcxx/queue" #include "third_party/libcxx/random" #include "third_party/libcxx/sstream" #include "third_party/libcxx/thread" #include "third_party/libcxx/unordered_map" #include "third_party/radpajama/gptneox-util.h" // clang-format off // Defines fileno on msys: // TODO: Add back in n_ctx (max_position_embeddings) to ggml model, it is currently hard-coded to 2048 max for llama #define GPTNEOX_USE_SCRATCH #define GPTNEOX_MAX_SCRATCH_BUFFERS 16 enum e_model { MODEL_UNKNOWN, MODEL_3B, // StabilityAI Base Alpha 3B MODEL_7B, MODEL_12B, MODEL_20B, }; static const size_t MiB = 1024*1024; // computed for n_ctx == 2048 // TODO: dynamically determine these sizes // TODO: To load the stablelm 3B model on my test XR will require some tricks, small ggml context size, mmap support, among others, but is maybe feasible, is a smaller n_ctx required? 512 instead of 2048/4096? Does mmap work as desired on iOS? // needs modifications in ggml // TODO: Modify for gptneox, how are these values actually determined? // TODO: This is now priority, static const std::map<e_model, size_t> & MEM_REQ_SCRATCH0() { static std::map<e_model, size_t> _MEM_REQ_SCRATCH0 = { { MODEL_3B, 128ull * MiB }, { MODEL_7B, 512ull * MiB }, { MODEL_12B, 512ull * MiB }, { MODEL_20B, 512ull * MiB }, }; return _MEM_REQ_SCRATCH0; } // TODO: Modify for gptneox, how are these values actually determined? static const std::map<e_model, size_t> & MEM_REQ_SCRATCH1() { static std::map<e_model, size_t> _MEM_REQ_SCRATCH1 = { { MODEL_3B, 128ull * MiB }, { MODEL_7B, 512ull * MiB }, { MODEL_12B, 512ull * MiB }, { MODEL_20B, 512ull * MiB }, }; return _MEM_REQ_SCRATCH1; } // TODO: Modify for gptneox, how are these values actually determined? // 2*n_embd*n_ctx*n_layer*sizeof(float16) // llama 7B: 2 * 768 * 32 * 2 = 98304 static const std::map<e_model, size_t> & MEM_REQ_KV_SELF() { static std::map<e_model, size_t> _MEM_REQ_KV_SELF = { { MODEL_3B, 512ull * MiB }, { MODEL_7B, 1026ull * MiB }, { MODEL_12B, 1608ull * MiB }, { MODEL_20B, 1608ull * MiB }, }; return _MEM_REQ_KV_SELF; } // TODO: Modify for gptneox, how are these values actually determined? // this is mostly needed for temporary mul_mat buffers to dequantize the data // not actually needed if BLAS is disabled static const std::map<e_model, size_t> & MEM_REQ_EVAL() { static std::map<e_model, size_t> _MEM_REQ_EVAL = { { MODEL_3B, 512ull * MiB }, { MODEL_7B, 768ull * MiB }, { MODEL_12B, 1024ull * MiB }, { MODEL_20B, 1024ull * MiB }, }; return _MEM_REQ_EVAL; } // default hparams (GPT-NeoX oasst 12B) struct gptneox_hparams { uint32_t n_vocab = 50288; uint32_t n_ctx = 4096; // this is provided as user input? uint32_t n_embd = 5120; uint32_t n_head = 40; uint32_t n_layer = 36; uint32_t n_rot = 32; uint32_t use_parallel_residual = 1; // 1 = true, 0 = false enum gptneox_ftype ftype = GPTNEOX_FTYPE_MOSTLY_F16; bool operator!=(const gptneox_hparams & other) const { return memcmp(this, &other, sizeof(gptneox_hparams)); } }; struct gptneox_layer { // input_layernorm struct ggml_tensor * ln_attn_g; struct ggml_tensor * ln_attn_b; // post_attention_layernorm struct ggml_tensor * ln_ff_g; struct ggml_tensor * ln_ff_b; // attention struct ggml_tensor * c_attn_attn_w; struct ggml_tensor * c_attn_attn_b; struct ggml_tensor * c_attn_proj_w; struct ggml_tensor * c_attn_proj_b; // ff struct ggml_tensor * c_mlp_fc_w; struct ggml_tensor * c_mlp_fc_b; struct ggml_tensor * c_mlp_proj_w; struct ggml_tensor * c_mlp_proj_b; }; struct gptneox_kv_cache { struct ggml_tensor * k; struct ggml_tensor * v; struct ggml_context * ctx = NULL; gptneox_buffer buf; int n; // number of tokens currently in the cache ~gptneox_kv_cache() { if (ctx) { ggml_free(ctx); } } }; struct gptneox_model { e_model type = MODEL_UNKNOWN; gptneox_hparams hparams; // final normalization struct ggml_tensor * ln_f_g; struct ggml_tensor * ln_f_b; // word embedding struct ggml_tensor * wte; // language model head struct ggml_tensor * lmh_g; std::vector<gptneox_layer> layers; // context struct ggml_context * ctx = NULL; // key + value cache for the self attention // TODO: move to gptneox_state struct gptneox_kv_cache kv_self; // the model memory buffer gptneox_buffer buf; // model memory mapped file std::unique_ptr<gptneox_mmap> mapping; // objects representing data potentially being locked in memory gptneox_mlock mlock_buf; gptneox_mlock mlock_mmap; // for quantize-stats only std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name; ~gptneox_model() { if (ctx) { ggml_free(ctx); } } }; struct gptneox_vocab { using id = int32_t; using token = std::string; struct token_score { token tok; float score; }; std::unordered_map<token, id> token_to_id; std::vector<token_score> id_to_token; }; struct gptneox_context { std::mt19937 rng; int64_t t_load_us = 0; int64_t t_start_us = 0; bool has_evaluated_once = false; int64_t t_sample_us = 0; int64_t t_eval_us = 0; int64_t t_p_eval_us = 0; int32_t n_sample = 0; // number of tokens sampled int32_t n_eval = 0; // number of eval calls int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1) gptneox_model model; gptneox_vocab vocab; size_t mem_per_token = 0; // decode output (2-dimensional array: [n_tokens][n_vocab]) std::vector<float> logits; bool logits_all = false; // input embedding (1-dimensional array: [n_embd]) std::vector<float> embedding; // memory buffers used to evaluate the model // TODO: move in gptneox_state gptneox_buffer buf_compute; gptneox_buffer buf_scratch[GPTNEOX_MAX_SCRATCH_BUFFERS]; int buf_last = 0; size_t buf_max_size[GPTNEOX_MAX_SCRATCH_BUFFERS] = { 0 }; void use_buf(struct ggml_context * ctx, int i) { #if defined(GPTNEOX_USE_SCRATCH) size_t last_size = 0; if (i == -1) { last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, }); } else { auto & buf = buf_scratch[i]; last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.addr, }); } if (buf_last >= 0) { buf_max_size[buf_last] = std::max(buf_max_size[buf_last], last_size); } buf_last = i; #else (void) i; (void) ctx; #endif } size_t get_buf_max_mem(int i) const { #if defined(GPTNEOX_USE_SCRATCH) return buf_max_size[i]; #else (void) i; return 0; #endif } }; template <typename T> static T checked_mul(T a, T b) { T ret = a * b; if (a != 0 && ret / a != b) { Die("overflow multiplying %llu * %llu", (unsigned long long) a, (unsigned long long) b); } return ret; } static size_t checked_div(size_t a, size_t b) { if (b == 0 || a % b != 0) { Die("error dividing %zu / %zu", a, b); } return a / b; } static std::string gptneox_format_tensor_shape(const std::vector<uint32_t> & ne) { char buf[256]; snprintf(buf, sizeof(buf), "%5u", ne.at(0)); for (size_t i = 1; i < ne.size(); i++) { snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i)); } return buf; } static size_t gptneox_calc_tensor_size(const std::vector<uint32_t> & ne, enum ggml_type type) { size_t size = ggml_type_size(type); for (uint32_t dim : ne) { size = checked_mul<size_t>(size, dim); } return size / ggml_blck_size(type); } struct gptneox_load_tensor_shard { std::vector<uint32_t> ne; size_t size; enum ggml_type type; size_t file_idx; size_t file_off; void calc_size() { size = gptneox_calc_tensor_size(ne, type); } }; enum gptneox_split_type { SPLIT_NONE, SPLIT_BY_COLUMNS, SPLIT_BY_ROWS }; struct gptneox_load_tensor { std::vector<gptneox_load_tensor_shard> shards; std::string name; enum ggml_type type = GGML_TYPE_F32; gptneox_split_type split_type = SPLIT_NONE; std::vector<uint32_t> ne; size_t size; struct ggml_tensor * ggml_tensor = NULL; uint8_t * data; gptneox_load_tensor(const std::string & name) : name(name) {} void calc_all() { calc_type(); calc_split_type(); calc_ne(); calc_size(); } void calc_type() { const auto & first_shard = shards.at(0); for (const auto & shard : shards) { if (shard.type != first_shard.type) { Die("inconsistent tensor shard type in '%s'", name.c_str()); } } type = first_shard.type; } void calc_split_type() { if (shards.at(0).ne.size() == 1 || // 1D tensors are just duplicated in every file shards.size() == 1) { // only one file? split_type = SPLIT_NONE; } else if (name.find("tok_embeddings.") == 0 || name.find(".attention.wo.weight") != std::string::npos || name.find(".feed_forward.w2.weight") != std::string::npos) { split_type = SPLIT_BY_COLUMNS; } else { split_type = SPLIT_BY_ROWS; } } void calc_ne() { const auto & first_shard = shards.at(0); for (const auto & shard : shards) { if (shard.ne != first_shard.ne) { Die("inconsistent tensor shard shape in '%s': first was %s, other was %s", name.c_str(), gptneox_format_tensor_shape(first_shard.ne).c_str(), gptneox_format_tensor_shape(shard.ne).c_str()); } } ne = first_shard.ne; GPTNEOX_ASSERT(shards.size() <= UINT32_MAX); uint32_t n_shards = (uint32_t) shards.size(); switch (split_type) { case SPLIT_NONE: ne = first_shard.ne; break; case SPLIT_BY_COLUMNS: ne = {checked_mul<uint32_t>(first_shard.ne[0], n_shards), first_shard.ne[1]}; break; case SPLIT_BY_ROWS: ne = {first_shard.ne[0], checked_mul<uint32_t>(first_shard.ne[1], n_shards)}; break; } } void calc_size() { size = gptneox_calc_tensor_size(ne, type); } }; struct gptneox_load_tensors_map { // tensors is kept in a separate vector to preserve file order std::vector<gptneox_load_tensor> tensors; std::unordered_map<std::string, size_t> name_to_idx; }; enum gptneox_file_version { GPTNEOX_FILE_VERSION_GGML, GPTNEOX_FILE_VERSION_GGMF_V1, // added version field and scores in vocab GPTNEOX_FILE_VERSION_GGJT_V1, // adopted unified aligned mappable layout GPTNEOX_FILE_VERSION_GGJT_V2, // changed quantization format }; struct gptneox_file_loader { gptneox_file file; gptneox_file_version file_version; gptneox_hparams hparams; gptneox_vocab vocab; gptneox_file_loader(const char * fname, size_t file_idx, gptneox_load_tensors_map & tensors_map) : file(fname, "rb") { fprintf(stderr, "gptneox.cpp: loading model from %s\n", fname); read_magic(); read_hparams(); read_vocab(); read_tensor_metadata(file_idx, tensors_map); } void read_magic() { uint32_t magic = file.read_u32(); uint32_t version = 0; if (magic != READ32BE("ggml")) { version = file.read_u32(); } if (magic == READ32BE("ggml") && version == 0) { file_version = GPTNEOX_FILE_VERSION_GGML; ggjt_v1(); } else if (magic == READ32BE("ggmf") && version == 1) { file_version = GPTNEOX_FILE_VERSION_GGMF_V1; ggjt_v1(); } else if (magic == READ32BE("ggjt") && version == 1) { file_version = GPTNEOX_FILE_VERSION_GGJT_V1; ggjt_v1(); } else if (magic == READ32BE("ggjt") && version == 2) { file_version = GPTNEOX_FILE_VERSION_GGJT_V2; ggjt_v2(); } else { Die("unknown (magic, version) combination: %08x, %08x; is this really a GGML file?", magic, version); } } void read_hparams() { hparams.n_vocab = file.read_u32(); hparams.n_ctx = file.read_u32(); hparams.n_embd = file.read_u32(); hparams.n_head = file.read_u32(); hparams.n_layer = file.read_u32(); hparams.n_rot = file.read_u32(); hparams.use_parallel_residual = file.read_u32(); hparams.ftype = (enum gptneox_ftype) file.read_u32(); } void read_vocab() { vocab.id_to_token.resize(hparams.n_vocab); for (uint32_t i = 0; i < hparams.n_vocab; i++) { uint32_t len = file.read_u32(); std::string word = file.read_string(len); float score = 0.0f; // TODO: Implement scores in gptneox /*if (file_version >= GPTNEOX_FILE_VERSION_GGMF_V1) { file.read_raw(&score, sizeof(score)); }*/ vocab.token_to_id[word] = i; auto & tok_score = vocab.id_to_token[i]; tok_score.tok = std::move(word); tok_score.score = score; } } void read_tensor_metadata(size_t file_idx, gptneox_load_tensors_map & tensors_map) { while (file.tell() < file.size) { gptneox_load_tensor_shard shard; uint32_t n_dims = file.read_u32(); uint32_t name_len = file.read_u32(); shard.type = (enum ggml_type) file.read_u32(); shard.ne.resize(n_dims); file.read_raw(shard.ne.data(), sizeof(shard.ne[0]) * n_dims); std::string name = file.read_string(name_len); if (n_dims < 1 || n_dims > 2) { Die("gptneox.cpp: tensor '%s' should not be %u-dimensional", name.c_str(), n_dims); } switch (shard.type) { case GGML_TYPE_F32: case GGML_TYPE_F16: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q4_2: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: break; default: { Die("unrecognized tensor type %u\n", shard.type); } } if (file_version >= GPTNEOX_FILE_VERSION_GGJT_V1) { // skip to the next multiple of 32 bytes file.seek(-file.tell() & 31, SEEK_CUR); } shard.file_idx = file_idx; shard.file_off = file.tell(); shard.calc_size(); file.seek(shard.size, SEEK_CUR); auto it = tensors_map.name_to_idx.find(name); size_t idx; if (it != tensors_map.name_to_idx.end()) { idx = it->second; } else { tensors_map.tensors.emplace_back(name); idx = tensors_map.tensors.size() - 1; tensors_map.name_to_idx.emplace(name, idx); } tensors_map.tensors.at(idx).shards.push_back(shard); } } }; struct gptneox_file_saver { gptneox_file file; gptneox_file_loader * any_file_loader; gptneox_file_saver(const char * fname, gptneox_file_loader * any_file_loader, enum gptneox_ftype new_ftype) : file(fname, "wb"), any_file_loader(any_file_loader) { fprintf(stderr, "gptneox.cpp: saving model to %s\n", fname); write_magic(); write_hparams(new_ftype); write_vocab(); } void write_magic() { ggjt_v2(); file.write_u32(READ32BE("ggjt")); // magic file.write_u32(2); // version } void write_hparams(enum gptneox_ftype new_ftype) { const gptneox_hparams & hparams = any_file_loader->hparams; file.write_u32(hparams.n_vocab); file.write_u32(hparams.n_ctx); file.write_u32(hparams.n_embd); file.write_u32(hparams.n_head); file.write_u32(hparams.n_layer); file.write_u32(hparams.n_rot); file.write_u32(hparams.use_parallel_residual); file.write_u32(new_ftype); } void write_vocab() { if (any_file_loader->file_version == GPTNEOX_FILE_VERSION_GGML) { fprintf(stderr, "gptneox.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n"); } uint32_t n_vocab = any_file_loader->hparams.n_vocab; for (uint32_t i = 0; i < n_vocab; i++) { const auto & token_score = any_file_loader->vocab.id_to_token.at(i); file.write_u32((uint32_t) token_score.tok.size()); file.write_raw(token_score.tok.data(), token_score.tok.size()); // TODO: Implement scores in gptneox? //file.write_raw(&token_score.score, sizeof(token_score.score)); } } void write_tensor(gptneox_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) { switch (new_type) { case GGML_TYPE_F32: case GGML_TYPE_F16: case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_1: case GGML_TYPE_Q4_2: case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: break; default: GPTNEOX_ASSERT(false); } file.write_u32((uint32_t) tensor.ne.size()); file.write_u32((uint32_t) tensor.name.size()); file.write_u32(new_type); file.write_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * tensor.ne.size()); file.write_raw(tensor.name.data(), tensor.name.size()); file.seek(-file.tell() & 31, SEEK_CUR); GPTNEOX_ASSERT(new_size == gptneox_calc_tensor_size(tensor.ne, new_type)); file.write_raw(new_data, new_size); } }; struct gptneox_model_loader { std::vector<std::unique_ptr<gptneox_file_loader>> file_loaders; gptneox_load_tensors_map tensors_map; bool use_mmap; size_t num_ggml_tensors_created = 0; struct ggml_context * ggml_ctx = NULL; std::unique_ptr<gptneox_mmap> mapping; gptneox_model_loader(const std::string & fname_base, bool use_mmap, bool vocab_only) { auto first_file = new gptneox_file_loader(fname_base.c_str(), 0, tensors_map); file_loaders.emplace_back(first_file); uint32_t n_parts = vocab_only ? 1 : guess_n_parts(); for (uint32_t i = 1; i < n_parts; i++) { std::string fname = fname_base + "." + std::to_string(i); auto ith_file = new gptneox_file_loader(fname.c_str(), i, tensors_map); file_loaders.emplace_back(ith_file); if (ith_file->hparams != first_file->hparams) { Die("gptneox.cpp: hparams inconsistent between files"); } } if (!gptneox_mmap::SUPPORTED) { use_mmap = false; } if (use_mmap && alignment_prevents_mmap()) { fprintf(stderr, "gptneox.cpp: can't use mmap because tensors are not aligned; convert to new format to avoid this\n"); use_mmap = false; } this->use_mmap = use_mmap; for (gptneox_load_tensor & lt : tensors_map.tensors) { lt.calc_all(); } } bool alignment_prevents_mmap() { for (const gptneox_load_tensor & lt : tensors_map.tensors) { for (const gptneox_load_tensor_shard & shard : lt.shards) { if (shard.file_off & 3) { return true; } } } return false; } uint32_t guess_n_parts() const { auto it = tensors_map.name_to_idx.find("gpt_neox.embed_in.weight"); if (it == tensors_map.name_to_idx.end()) { Die("missing gpt_neox.embed_in.weight"); } const gptneox_load_tensor & lt = tensors_map.tensors.at(it->second); return file_loaders.at(0)->hparams.n_embd / lt.shards.at(0).ne.at(0); } void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const { *ctx_size_p = *mmapped_size_p = 0; for (const gptneox_load_tensor & lt : tensors_map.tensors) { *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size; } } struct ggml_tensor * get_tensor(const std::string & name, std::vector<uint32_t> ne) { auto it = tensors_map.name_to_idx.find(name); if (it == tensors_map.name_to_idx.end()) { Die("gptneox.cpp: tensor '%s' is missing from model", name.c_str()); } gptneox_load_tensor & lt = tensors_map.tensors.at(it->second); if (lt.ne != ne) { Die("gptneox.cpp: tensor '%s' has wrong shape; expected %s, got %s", name.c_str(), gptneox_format_tensor_shape(ne).c_str(), gptneox_format_tensor_shape(lt.ne).c_str()); } return get_tensor_for(lt); } struct ggml_tensor * get_tensor_for(gptneox_load_tensor & lt) { struct ggml_tensor * tensor; if (lt.ne.size() == 2) { tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1)); } else { GPTNEOX_ASSERT(lt.ne.size() == 1); tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0)); } GPTNEOX_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor lt.ggml_tensor = tensor; num_ggml_tensors_created++; return tensor; } void done_getting_tensors() { if (num_ggml_tensors_created != tensors_map.tensors.size()) { Die("gptneox.cpp: file contained more tensors than expected"); } } void load_all_data(gptneox_progress_callback progress_callback, void * progress_callback_user_data, gptneox_mlock * lmlock) { size_t data_size = 0; for (const gptneox_load_tensor & lt : tensors_map.tensors) { data_size += lt.size; } if (use_mmap) { mapping.reset(new gptneox_mmap(&file_loaders.at(0)->file)); if (!lmlock) { // Don't call the callback since the actual loading will be lazy // and we can't measure it. progress_callback = NULL; } if (lmlock) { lmlock->init(mapping->addr); } } size_t done_size = 0; for (gptneox_load_tensor & lt : tensors_map.tensors) { if (progress_callback) { progress_callback((float) done_size / data_size, progress_callback_user_data); } GPTNEOX_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already lt.data = (uint8_t *) lt.ggml_tensor->data; load_data_for(lt); lt.ggml_tensor->data = lt.data; done_size += lt.size; if (use_mmap && lmlock) { lmlock->grow_to(done_size); } } if (progress_callback) { progress_callback(1.0f, progress_callback_user_data); } } void load_data_for(gptneox_load_tensor & lt) { if (use_mmap) { GPTNEOX_ASSERT(lt.shards.size() == 1); lt.data = (uint8_t *) mapping->addr + lt.shards.at(0).file_off; } else if (lt.split_type == SPLIT_NONE) { gptneox_file & file = file_loaders.at(lt.shards.at(0).file_idx)->file; file.seek(lt.shards.at(0).file_off, SEEK_SET); file.read_raw(lt.data, lt.size); } else if (lt.split_type == SPLIT_BY_ROWS) { size_t offset = 0; for (gptneox_load_tensor_shard & shard : lt.shards) { gptneox_file & file = file_loaders.at(shard.file_idx)->file; file.seek(shard.file_off, SEEK_SET); file.read_raw(lt.data + offset, shard.size); offset += shard.size; } GPTNEOX_ASSERT(offset == lt.size); } else if (lt.split_type == SPLIT_BY_COLUMNS) { // Let's load the data into temporary buffers to ensure the OS performs large loads. std::vector<gptneox_buffer> tmp_bufs; tmp_bufs.resize(lt.shards.size()); for (size_t i = 0; i < lt.shards.size(); i++) { gptneox_load_tensor_shard & shard = lt.shards.at(i); gptneox_file & file = file_loaders.at(shard.file_idx)->file; file.seek(shard.file_off, SEEK_SET); tmp_bufs.at(i).resize(shard.size); file.read_raw(tmp_bufs.at(i).addr, shard.size); } // Then reshape. size_t num_rows = lt.ne.at(1); size_t per_shard_row_size = lt.shards.at(0).size / num_rows; size_t out_offset = 0; for (size_t row = 0; row < num_rows; row++) { for (gptneox_buffer & tmp_buf : tmp_bufs) { memcpy(lt.data + out_offset, tmp_buf.addr + row * per_shard_row_size, per_shard_row_size); out_offset += per_shard_row_size; } } GPTNEOX_ASSERT(out_offset == lt.size); } if (0) { print_checksum(lt); } } static void print_checksum(gptneox_load_tensor & lt) { uint32_t sum = 0; for (size_t i = 0; i < lt.size; i++) { uint8_t byte = lt.data[i]; sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash } fprintf(stderr, "%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum, gptneox_format_tensor_shape(lt.ne).c_str(), lt.size); } }; // // kv cache // static bool kv_cache_init( const struct gptneox_hparams & hparams, struct gptneox_kv_cache & cache, ggml_type wtype, int n_ctx) { const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int64_t n_mem = (int64_t)n_layer*n_ctx; const int64_t n_elements = n_embd*n_mem; cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MiB); struct ggml_init_params params; params.mem_size = cache.buf.size; params.mem_buffer = cache.buf.addr; params.no_alloc = false; cache.ctx = ggml_init(params); if (!cache.ctx) { fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__); return false; } cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements); cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements); return true; } struct gptneox_context_params gptneox_context_default_params() { struct gptneox_context_params result = { /*.n_ctx =*/ 512, /*.n_parts =*/ -1, /*.seed =*/ 0, /*.f16_kv =*/ false, /*.logits_all =*/ false, /*.vocab_only =*/ false, /*.use_mmap =*/ true, /*.use_mlock =*/ false, /*.embedding =*/ false, /*.progress_callback =*/ nullptr, /*.progress_callback_user_data =*/ nullptr, }; return result; } bool gptneox_mmap_supported() { return gptneox_mmap::SUPPORTED; } bool gptneox_mlock_supported() { return gptneox_mlock::SUPPORTED; } // // model loading // static const char *gptneox_file_version_name(gptneox_file_version version) { switch (version) { case GPTNEOX_FILE_VERSION_GGML: return "'ggml' (old version with low tokenizer quality and no mmap support)"; case GPTNEOX_FILE_VERSION_GGMF_V1: return "ggmf v1 (old version with no mmap support)"; case GPTNEOX_FILE_VERSION_GGJT_V1: return "ggjt v1 (pre #1405)"; case GPTNEOX_FILE_VERSION_GGJT_V2: return "ggjt v2 (latest)"; default: GPTNEOX_ASSERT(false); } } static const char *gptneox_ftype_name(enum gptneox_ftype ftype) { switch (ftype) { case GPTNEOX_FTYPE_ALL_F32: return "all F32"; case GPTNEOX_FTYPE_MOSTLY_F16: return "mostly F16"; case GPTNEOX_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0"; case GPTNEOX_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1"; case GPTNEOX_FTYPE_MOSTLY_Q4_1_SOME_F16: return "mostly Q4_1, some F16"; case GPTNEOX_FTYPE_MOSTLY_Q4_2: return "mostly Q4_2"; //case GPTNEOX_FTYPE_MOSTLY_Q4_3: return "mostly Q4_3"; case GPTNEOX_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0"; case GPTNEOX_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1"; case GPTNEOX_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0"; default: return "unknown, may not work"; } } static const char *gptneox_model_type_name(e_model type) { switch (type) { case MODEL_3B: return "3B"; case MODEL_7B: return "7B"; case MODEL_12B: return "12B"; case MODEL_20B: return "20B"; case MODEL_UNKNOWN: return "UNKNOWN"; default: GPTNEOX_ASSERT(false); } } static void gptneox_model_load_internal( const std::string & fname, gptneox_context & lctx, int n_ctx, ggml_type memory_type, bool use_mmap, bool use_mlock, bool vocab_only, gptneox_progress_callback progress_callback, void * progress_callback_user_data) { lctx.t_start_us = ggml_time_us(); std::unique_ptr<gptneox_model_loader> ml(new gptneox_model_loader(fname, use_mmap, vocab_only)); lctx.vocab = std::move(ml->file_loaders.at(0)->vocab); auto & model = lctx.model; model.hparams = ml->file_loaders.at(0)->hparams; gptneox_file_version file_version = ml->file_loaders.at(0)->file_version; auto & hparams = model.hparams; { switch (hparams.n_layer) { case 16: { if (hparams.n_embd < 6144) { model.type = e_model::MODEL_3B; } else { model.type = e_model::MODEL_7B; } break; } // # <RedPajama>: we extend the model type settings for RedPajama models. case 32:{ if (hparams.n_embd == 2560) { model.type = e_model::MODEL_3B; } else if (hparams.n_embd == 4096) { model.type = e_model::MODEL_7B; } else { model.type = e_model::MODEL_UNKNOWN; } break; } case 36: model.type = e_model::MODEL_12B; break; case 44: model.type = e_model::MODEL_20B; break; } hparams.n_ctx = n_ctx; } { fprintf(stderr, "%s: format = %s\n", __func__, gptneox_file_version_name(file_version)); fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab); fprintf(stderr, "%s: n_ctx = %u\n", __func__, hparams.n_ctx); fprintf(stderr, "%s: n_embd = %u\n", __func__, hparams.n_embd); fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head); fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer); fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot); fprintf(stderr, "%s: use_parallel_residual = %d\n", __func__, hparams.use_parallel_residual); fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, gptneox_ftype_name(hparams.ftype)); fprintf(stderr, "%s: n_parts = %zu\n", __func__, ml->file_loaders.size()); fprintf(stderr, "%s: model size = %s\n", __func__, gptneox_model_type_name(model.type)); } if (vocab_only) { return; } auto & ctx = model.ctx; size_t ctx_size, mmapped_size; ml->calc_sizes(&ctx_size, &mmapped_size); fprintf(stderr, "%s: ggml ctx size = %6.2f KiB\n", __func__, ctx_size/1024.0); // print memory requirements { const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1; // this is the total memory required to run the inference const size_t mem_required = ctx_size + mmapped_size + MEM_REQ_SCRATCH0().at(model.type) + MEM_REQ_SCRATCH1().at(model.type) + MEM_REQ_EVAL().at(model.type); // this is the memory required by one gptneox_state const size_t mem_required_state = scale*MEM_REQ_KV_SELF().at(model.type); fprintf(stderr, "%s: mem required = %7.2f MiB (+ %7.2f MiB per state)\n", __func__, mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); } // create the ggml context { lctx.model.buf.resize(ctx_size); if (use_mlock) { lctx.model.mlock_buf.init(lctx.model.buf.addr); lctx.model.mlock_buf.grow_to(lctx.model.buf.size); } struct ggml_init_params params = { /*.mem_size =*/ lctx.model.buf.size, /*.mem_buffer =*/ lctx.model.buf.addr, /*.no_alloc =*/ ml->use_mmap, }; model.ctx = ggml_init(params); if (!model.ctx) { Die("ggml_init() failed"); } } // prepare memory for the weights { const auto & hparams = model.hparams; const uint32_t n_embd = hparams.n_embd; const uint32_t n_layer = hparams.n_layer; const uint32_t n_vocab = hparams.n_vocab; ml->ggml_ctx = ctx; model.wte = ml->get_tensor("gpt_neox.embed_in.weight", {n_embd, n_vocab}); model.ln_f_g = ml->get_tensor("gpt_neox.final_layer_norm.weight", {n_embd}); model.ln_f_b = ml->get_tensor("gpt_neox.final_layer_norm.bias", {n_embd}); model.lmh_g = ml->get_tensor("embed_out.weight", {n_embd, n_vocab}); model.layers.resize(n_layer); for (uint32_t i = 0; i < n_layer; ++i) { auto & layer = model.layers[i]; std::string layers_i = "gpt_neox.layers." + std::to_string(i); layer.ln_attn_g = ml->get_tensor(layers_i + ".input_layernorm.weight", {n_embd}); layer.ln_attn_b = ml->get_tensor(layers_i + ".input_layernorm.bias", {n_embd}); layer.c_attn_attn_w = ml->get_tensor(layers_i + ".attention.query_key_value.weight", {n_embd, n_embd * 3}); layer.c_attn_attn_b = ml->get_tensor(layers_i + ".attention.query_key_value.bias", {n_embd * 3}); layer.c_attn_proj_w = ml->get_tensor(layers_i + ".attention.dense.weight", {n_embd, n_embd}); layer.c_attn_proj_b = ml->get_tensor(layers_i + ".attention.dense.bias", {n_embd}); layer.ln_ff_g = ml->get_tensor(layers_i + ".post_attention_layernorm.weight", {n_embd}); layer.ln_ff_b = ml->get_tensor(layers_i + ".post_attention_layernorm.bias", {n_embd}); layer.c_mlp_fc_w = ml->get_tensor(layers_i + ".mlp.dense_h_to_4h.weight", {n_embd, n_embd * 4}); layer.c_mlp_fc_b = ml->get_tensor(layers_i + ".mlp.dense_h_to_4h.bias", {n_embd * 4}); layer.c_mlp_proj_w = ml->get_tensor(layers_i + ".mlp.dense_4h_to_h.weight", {n_embd * 4, n_embd}); layer.c_mlp_proj_b = ml->get_tensor(layers_i + ".mlp.dense_4h_to_h.bias", {n_embd}); } } ml->done_getting_tensors(); // populate `tensors_by_name` for (gptneox_load_tensor & lt : ml->tensors_map.tensors) { model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor); } ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL); model.mapping = std::move(ml->mapping); // loading time will be recalculate after the first eval, so // we take page faults deferred by mmap() into consideration lctx.t_load_us = ggml_time_us() - lctx.t_start_us; } static bool gptneox_model_load( const std::string & fname, gptneox_context & lctx, int n_ctx, ggml_type memory_type, bool use_mmap, bool use_mlock, bool vocab_only, gptneox_progress_callback progress_callback, void *progress_callback_user_data) { // try { gptneox_model_load_internal(fname, lctx, n_ctx, memory_type, use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data); return true; // } catch (const std::string & err) { // fprintf(stderr, "error loading model: %s\n", err.c_str()); // return false; // } } // evaluate the transformer // // - lctx: llama context // - tokens: new batch of tokens to process // - n_past: the context size so far // - n_threads: number of threads to use // static bool gptneox_eval_internal( gptneox_context & lctx, const gptneox_token * tokens, const int n_tokens, const int n_past, const int n_threads) { const int64_t t_start_us = ggml_time_us(); const int N = n_tokens; const auto & model = lctx.model; const auto & hparams = model.hparams; auto & kv_self = model.kv_self; GPTNEOX_ASSERT(!!kv_self.ctx); const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; const int n_ctx = hparams.n_ctx; const int n_head = hparams.n_head; const int n_vocab = hparams.n_vocab; const int n_rot = hparams.n_rot; auto & mem_per_token = lctx.mem_per_token; auto & buf_compute = lctx.buf_compute; struct ggml_init_params params = { /*.mem_size =*/ buf_compute.size, /*.mem_buffer =*/ buf_compute.addr, /*.no_alloc =*/ false, }; struct ggml_context * ctx0 = ggml_init(params); // for big prompts, if BLAS is enabled, it is better to use only one thread // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance ggml_cgraph gf = {}; gf.n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_cublas() ? 1 : n_threads; struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); memcpy(embd->data, tokens, N*ggml_element_size(embd)); struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd); for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * cur; lctx.use_buf(ctx0, 0); // input norm { cur = ggml_norm(ctx0, inpL); // cur = ln_attn_g*cur + ln_attn_b cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].ln_attn_g, cur), cur), ggml_repeat(ctx0, model.layers[il].ln_attn_b, cur)); } // self-attention { // attn // [3*n_embd, n_embd] - model.layers[il].c_attn_attn_w // [3*n_embd, 1] - model.layers[il].c_attn_attn_b // [ n_embd, N] - cur (in) // [3*n_embd, N] - cur (out) // // cur = attn_w*cur + attn_b // [3*n_embd, N] { cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_attn_w, cur); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur), cur); } // Split QKV and make contiguous struct ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, ggml_element_size(cur) * 3 * n_embd/n_head, ggml_element_size(cur) * 3 * n_embd, ggml_element_size(cur) * n_embd/n_head * 0); struct ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, ggml_element_size(cur) * 3 * n_embd/n_head, ggml_element_size(cur) * 3 * n_embd, ggml_element_size(cur) * n_embd/n_head * 1); struct ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, ggml_element_size(cur) * 3 * n_embd/n_head, ggml_element_size(cur) * 3 * n_embd, ggml_element_size(cur) * n_embd/n_head * 2); // TODO: Flatten without copying, or see if non-contiguous can be used for any of QKV. Qcur = ggml_cpy(ctx0, Qcur, ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)); Kcur = ggml_cpy(ctx0, Kcur, ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)); Vcur = ggml_cpy(ctx0, Vcur, ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)); // MARK: gptneox RoPE Q and K, before cache // Bit 2 for gptneox style (2) // Bit 1 is zero for dont skip n_past +(0), use (2+1) = (3) if rope is applied to cache of k (after cache only) Qcur = ggml_rope(ctx0, Qcur, n_past, n_rot, 2); Kcur = ggml_rope(ctx0, Kcur, n_past, n_rot, 2); //3); // store key and value to memory, not required if prompt if only a single token (not practical or likely) //if (N >= 1) { // Each entry in kv_self has byte size of (ggml_element_size * n_embd * n_ctx * n_layer) Vcur = ggml_view_2d(ctx0, Vcur, n_embd, N, ggml_element_size(Vcur) * n_embd, 0); Vcur = ggml_transpose(ctx0, Vcur); struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, n_embd * N, // num elements in current context (up to n_embd*n_ctx but usually less) ggml_element_size(kv_self.k) * n_embd * (il * n_ctx + n_past)); struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd, ggml_element_size(kv_self.v) * n_ctx, ggml_element_size(kv_self.v) * ((il * n_ctx * n_embd) + n_past)); // important: storing RoPE-ed version of K in the KV cache! ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k)); ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); //} // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) struct ggml_tensor * K = ggml_permute(ctx0, ggml_reshape_3d(ctx0, ggml_view_1d(ctx0, kv_self.k, (n_past + N) * n_embd, ggml_element_size(kv_self.k) * il * n_ctx * n_embd), n_embd/n_head, n_head, n_past + N), 0, 2, 1, 3); // K * Q // Will use internally ggml_compute_forward_mul_mat_f16_f32 because K is f16 (cache) and Q is f32 (from q4_0) // Outputs [N, N, H, B], so it seems like this is correct for "scores" // K is internally transposed by ggml_mul_mat struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); // KQ_scaled = KQ / sqrt(n_embd/n_head) struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))); // KQ_masked = mask_past(KQ_scaled) struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); // KQ = soft_max(KQ_masked) struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() struct ggml_tensor * V_trans = ggml_view_3d(ctx0, kv_self.v, n_past + N, n_embd/n_head, n_head, ggml_element_size(kv_self.v) * n_ctx, ggml_element_size(kv_self.v) * n_ctx * n_embd/n_head, ggml_element_size(kv_self.v) * il * n_ctx * n_embd); // KQV = transpose(V) * KQ_soft_max struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max); // KQV_merged = KQV.permute(0, 2, 1, 3) struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); // cur = KQV_merged.contiguous().view(n_embd, N) cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); // projection (first weight) cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_proj_w, cur); // projection (then bias) cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), cur); } lctx.use_buf(ctx0, 1); if (hparams.use_parallel_residual == 1) { //printf("use_parallel_residual == 1\n"); // This is independent of the self-attention result, so it could be done in parallel to the self-attention struct ggml_tensor * outAttn = cur; // post attention layer norm { cur = ggml_norm(ctx0, inpL); // cur = ln_attn_g*inpFF + ln_attn_b cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].ln_ff_g, cur), cur), ggml_repeat(ctx0, model.layers[il].ln_ff_b, cur)); } // feed-forward network { // note here we pass inpFF instead of cur cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_fc_w, cur); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), cur); // GELU activation cur = ggml_gelu(ctx0, cur); // projection // cur = proj_w*inpFF + proj_b cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_proj_w, cur); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), cur); } //# pseudocode: //# x = x + attn(ln1(x)) + mlp(ln2(x)) // inpL = inpL + outAttn + cur cur = ggml_add(ctx0, outAttn, cur); inpL = ggml_add(ctx0, inpL, cur); } else if (hparams.use_parallel_residual == 0) { //printf("use_parallel_residual == 0\n"); // This takes the self-attention residual output as input to Feedforward struct ggml_tensor * outAttn = cur; struct ggml_tensor * inpFF = ggml_add(ctx0, outAttn, inpL); // post attention layer norm { cur = ggml_norm(ctx0, inpFF); // inpFF = ln_attn_g*inpFF + ln_attn_b cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].ln_ff_g, cur), cur), ggml_repeat(ctx0, model.layers[il].ln_ff_b, cur)); } // feed-forward network { // note here we pass inpFF instead of cur cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_fc_w, cur); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur), cur); cur = ggml_gelu(ctx0, cur); cur = ggml_mul_mat(ctx0, model.layers[il].c_mlp_proj_w, cur); cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur), cur); } //# pseudocode: //# x = x + attn(ln1(x)) (residual above as input to mlp) //# x = x + mlp(ln2(x)) (residual after mlp aka inpL + cur) //# <RedPajama>: we fixed a small issue in the gptneox.cpp fork when setting use_parallel_residual to False; inpL = ggml_add(ctx0, inpFF, cur); } else { printf("use_parallel_residual == %d\n", hparams.use_parallel_residual); assert(0); } } lctx.use_buf(ctx0, 0); // used at the end to optionally extract the embeddings struct ggml_tensor * embeddings = NULL; // norm { inpL = ggml_norm(ctx0, inpL); // inpL = ln_f_g*inpL + ln_f_b inpL = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, model.ln_f_g, inpL), inpL), ggml_repeat(ctx0, model.ln_f_b, inpL)); embeddings = inpL; } // lm_head inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL); lctx.use_buf(ctx0, -1); // logits -> probs //inpL = ggml_soft_max(ctx0, inpL); // run the computation ggml_build_forward_expand(&gf, inpL); ggml_graph_compute (ctx0, &gf); #ifdef GGML_PERF // print timing information per ggml operation (for debugging purposes) // requires GGML_PERF to be defined ggml_graph_print(&gf); #endif // plot the computation graph in dot format (for debugging purposes) //if (n_past%100 == 0) { // ggml_graph_dump_dot(&gf, NULL, "llama.dot"); //} //embd_w.resize(n_vocab*N); //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); // extract logits { auto & logits_out = lctx.logits; if (lctx.logits_all) { logits_out.resize(n_vocab * N); memcpy(logits_out.data(), (float *) ggml_get_data(inpL), sizeof(float)*n_vocab*N); } else { // return result for just the last token logits_out.resize(n_vocab); memcpy(logits_out.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); } } // extract embeddings if (lctx.embedding.size()) { auto & embedding_out = lctx.embedding; embedding_out.resize(n_embd); memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd); } if (mem_per_token == 0) { mem_per_token = ggml_used_mem(ctx0)/N; } #if 0 printf("\n%s: used_mem = %.3f MiB, scratch -- %.3f MiB %.3f MiB\n", __func__, ggml_used_mem(ctx0)/1024.0/1024.0, lctx.get_buf_max_mem(0)/1024.0/1024.0, lctx.get_buf_max_mem(1)/1024.0/1024.0); #endif ggml_free(ctx0); // measure the performance only for the single-token evals if (N == 1) { lctx.t_eval_us += ggml_time_us() - t_start_us; lctx.n_eval++; } else if (N > 1) { lctx.t_p_eval_us += ggml_time_us() - t_start_us; lctx.n_p_eval += N; } return true; } // // tokenizer // static size_t utf8_len(char src) { const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 }; uint8_t highbits = static_cast<uint8_t>(src) >> 4; return lookup[highbits]; } struct gptneox_sp_symbol { using index = int; index prev; index next; const char * text; size_t n; }; struct gptneox_sp_bigram { struct comparator { bool operator()(gptneox_sp_bigram & l, gptneox_sp_bigram & r) { return (l.score < r.score) || (l.score == r.score && l.left > r.left); } }; using queue_storage = std::vector<gptneox_sp_bigram>; using queue = std::priority_queue<gptneox_sp_bigram, queue_storage, comparator>; gptneox_sp_symbol::index left; gptneox_sp_symbol::index right; float score; size_t size; }; // original implementation: // https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4 struct gptneox_tokenizer { gptneox_tokenizer(const gptneox_vocab & vocab): vocab_(vocab) {} void tokenize(const std::string & text, std::vector<gptneox_vocab::id> & output) { // split string into utf8 chars int index = 0; size_t offs = 0; while (offs < text.size()) { gptneox_sp_symbol sym; size_t char_len = std::min(text.size() - offs, utf8_len(text[offs])); sym.text = text.c_str() + offs; sym.n = char_len; offs += char_len; sym.prev = index - 1; sym.next = offs == text.size() ? -1 : index + 1; index++; symbols_.emplace_back(std::move(sym)); } // seed the work queue with all possible 2-character tokens. for (size_t i = 1; i < symbols_.size(); ++i) { try_add_bigram(i - 1, i); } // keep substituting the highest frequency pairs for as long as we can. while (!work_queue_.empty()) { auto bigram = work_queue_.top(); work_queue_.pop(); auto & left_sym = symbols_[bigram.left]; auto & right_sym = symbols_[bigram.right]; // if one of the symbols already got merged, skip it. if (left_sym.n == 0 || right_sym.n == 0 || left_sym.n + right_sym.n != bigram.size) { continue; } // merge the right sym into the left one left_sym.n += right_sym.n; right_sym.n = 0; //printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size); // remove the right sym from the chain left_sym.next = right_sym.next; if (right_sym.next >= 0) { symbols_[right_sym.next].prev = bigram.left; } // find more substitutions try_add_bigram(left_sym.prev, bigram.left); try_add_bigram(bigram.left, left_sym.next); } for (int i = 0; i != -1; i = symbols_[i].next) { auto & symbol = symbols_[i]; auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n)); if (token == vocab_.token_to_id.end()) { // output any symbols that did not form tokens as bytes. for (int j = 0; j < (int) symbol.n; ++j) { gptneox_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3; output.push_back(token_id); } } else { output.push_back((*token).second); } } } private: void try_add_bigram(int left, int right) { if (left == -1 || right == -1) { return; } const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n); auto token = vocab_.token_to_id.find(text); if (token == vocab_.token_to_id.end()) { return; } if (static_cast<size_t>((*token).second) >= vocab_.id_to_token.size()) { return; } const auto &tok_score = vocab_.id_to_token[(*token).second]; gptneox_sp_bigram bigram; bigram.left = left; bigram.right = right; bigram.score = tok_score.score; bigram.size = text.size(); work_queue_.push(bigram); } const gptneox_vocab & vocab_; std::vector<gptneox_sp_symbol> symbols_; gptneox_sp_bigram::queue work_queue_; }; static std::vector<gptneox_vocab::id> gptneox_tokenize(const gptneox_vocab & vocab, const std::string & text, bool bos) { gptneox_tokenizer tokenizer(vocab); std::vector<gptneox_vocab::id> output; if (text.size() == 0) { return output; } if (bos) { output.push_back(gptneox_token_bos()); } tokenizer.tokenize(text, output); return output; } // // sampling // void gptneox_sample_softmax(struct gptneox_context * ctx, gptneox_token_data_array * candidates) { assert(candidates->size > 0); const int64_t t_start_sample_us = ggml_time_us(); // Sort the logits in descending order if (!candidates->sorted) { std::sort(candidates->data, candidates->data + candidates->size, [](const gptneox_token_data & a, const gptneox_token_data & b) { return a.logit > b.logit; }); candidates->sorted = true; } float max_l = candidates->data[0].logit; float cum_sum = 0.0f; for (size_t i = 0; i < candidates->size; ++i) { float p = expf(candidates->data[i].logit - max_l); candidates->data[i].p = p; cum_sum += p; } for (size_t i = 0; i < candidates->size; ++i) { candidates->data[i].p /= cum_sum; } if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } } void gptneox_sample_top_k(struct gptneox_context * ctx, gptneox_token_data_array * candidates, int k, size_t min_keep) { const int64_t t_start_sample_us = ggml_time_us(); k = std::max(k, (int) min_keep); k = std::min(k, (int) candidates->size); // Sort scores in descending order if (!candidates->sorted) { auto comp = [](const gptneox_token_data & a, const gptneox_token_data & b) { return a.logit > b.logit; }; if (k == (int) candidates->size) { std::sort(candidates->data, candidates->data + candidates->size, comp); } else { std::partial_sort(candidates->data, candidates->data + k, candidates->data + candidates->size, comp); } candidates->sorted = true; } candidates->size = k; if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } } void gptneox_sample_top_p(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float p, size_t min_keep) { if (p >= 1.0f) { return; } const int64_t t_start_sample_us = ggml_time_us(); gptneox_sample_softmax(ctx, candidates); // Compute the cumulative probabilities float cum_sum = 0.0f; size_t last_idx = candidates->size; for (size_t i = 0; i < candidates->size; ++i) { cum_sum += candidates->data[i].p; // Check if the running sum is greater than p or if we have kept at least min_keep tokens if (cum_sum > p && i >= min_keep) { last_idx = i; break; } } // Resize the output vector to keep only the top-p tokens candidates->size = last_idx; if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } } void gptneox_sample_tail_free(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float z, size_t min_keep) { if (z >= 1.0f || candidates->size <= 2) { return; } const int64_t t_start_sample_us = ggml_time_us(); gptneox_sample_softmax(nullptr, candidates); // Compute the first and second derivatives std::vector<float> first_derivatives(candidates->size - 1); std::vector<float> second_derivatives(candidates->size - 2); for (size_t i = 0; i < first_derivatives.size(); ++i) { first_derivatives[i] = candidates->data[i].p - candidates->data[i + 1].p; } for (size_t i = 0; i < second_derivatives.size(); ++i) { second_derivatives[i] = first_derivatives[i] - first_derivatives[i + 1]; } // Calculate absolute value of second derivatives for (size_t i = 0; i < second_derivatives.size(); ++i) { second_derivatives[i] = abs(second_derivatives[i]); } // Normalize the second derivatives float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f); for (float & value : second_derivatives) { value /= second_derivatives_sum; } float cum_sum = 0.0f; size_t last_idx = candidates->size; for (size_t i = 0; i < second_derivatives.size(); ++i) { cum_sum += second_derivatives[i]; // Check if the running sum is greater than z or if we have kept at least min_keep tokens if (cum_sum > z && i >= min_keep) { last_idx = i; break; } } // Resize the output vector to keep only the tokens above the tail location candidates->size = last_idx; if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } } void gptneox_sample_typical(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float p, size_t min_keep) { // Reference implementation: // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr if (p >= 1.0f) { return; } const int64_t t_start_sample_us = ggml_time_us(); // Compute the softmax of logits and calculate entropy gptneox_sample_softmax(nullptr, candidates); float entropy = 0.0f; for (size_t i = 0; i < candidates->size; ++i) { entropy += -candidates->data[i].p * logf(candidates->data[i].p); } // Compute the absolute difference between negative log probability and entropy for each candidate std::vector<float> shifted_scores; for (size_t i = 0; i < candidates->size; ++i) { float shifted_score = fabsf(-logf(candidates->data[i].p) - entropy); shifted_scores.push_back(shifted_score); } // Sort tokens based on the shifted_scores and their corresponding indices std::vector<size_t> indices(candidates->size); std::iota(indices.begin(), indices.end(), 0); std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) { return shifted_scores[a] < shifted_scores[b]; }); // Compute the cumulative probabilities float cum_sum = 0.0f; size_t last_idx = indices.size(); for (size_t i = 0; i < indices.size(); ++i) { size_t idx = indices[i]; cum_sum += candidates->data[idx].p; // Check if the running sum is greater than typical or if we have kept at least min_keep tokens if (cum_sum > p && i >= min_keep - 1) { last_idx = i + 1; break; } } // Resize the output vector to keep only the locally typical tokens std::vector<gptneox_token_data> new_candidates; for (size_t i = 0; i < last_idx; ++i) { size_t idx = indices[i]; new_candidates.push_back(candidates->data[idx]); } // Replace the data in candidates with the new_candidates data std::copy(new_candidates.begin(), new_candidates.end(), candidates->data); candidates->size = new_candidates.size(); if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } } void gptneox_sample_temperature(struct gptneox_context * ctx, gptneox_token_data_array * candidates_p, float temp) { const int64_t t_start_sample_us = ggml_time_us(); for (size_t i = 0; i < candidates_p->size; ++i) { candidates_p->data[i].logit /= temp; } if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } } void gptneox_sample_repetition_penalty(struct gptneox_context * ctx, gptneox_token_data_array * candidates, gptneox_token * last_tokens, size_t last_tokens_size, float penalty) { if (last_tokens_size == 0 || penalty == 1.0f) { return; } const int64_t t_start_sample_us = ggml_time_us(); for (size_t i = 0; i < candidates->size; ++i) { auto token_iter = std::find(last_tokens, last_tokens + last_tokens_size, candidates->data[i].id); if (token_iter == last_tokens + last_tokens_size) { continue; } // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong. // This is common fix for this problem, which is to multiply by the penalty instead of dividing. if (candidates->data[i].logit <= 0) { candidates->data[i].logit *= penalty; } else { candidates->data[i].logit /= penalty; } } candidates->sorted = false; if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } } void gptneox_sample_frequency_and_presence_penalties(struct gptneox_context * ctx, gptneox_token_data_array * candidates, gptneox_token * last_tokens_p, size_t last_tokens_size, float alpha_frequency, float alpha_presence) { if (last_tokens_size == 0 || (alpha_frequency == 0.0f && alpha_presence == 0.0f)) { return; } const int64_t t_start_sample_us = ggml_time_us(); // Create a frequency map to count occurrences of each token in last_tokens std::unordered_map<gptneox_token, int> token_count; for (size_t i = 0; i < last_tokens_size; ++i) { token_count[last_tokens_p[i]]++; } // Apply frequency and presence penalties to the candidates for (size_t i = 0; i < candidates->size; ++i) { auto token_iter = token_count.find(candidates->data[i].id); if (token_iter == token_count.end()) { continue; } int count = token_iter->second; candidates->data[i].logit -= float(count) * alpha_frequency + float(count > 0) * alpha_presence; } candidates->sorted = false; if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } } gptneox_token gptneox_sample_token_mirostat(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float tau, float eta, int m, float * mu) { assert(ctx); auto N = float(gptneox_n_vocab(ctx)); int64_t t_start_sample_us; t_start_sample_us = ggml_time_us(); gptneox_sample_softmax(nullptr, candidates); // Estimate s_hat using the most probable m tokens float s_hat = 0.0; float sum_ti_bi = 0.0; float sum_ti_sq = 0.0; for (size_t i = 0; i < size_t(m - 1) && i < candidates->size - 1; ++i) { float t_i = logf(float(i + 2) / float(i + 1)); float b_i = logf(candidates->data[i].p / candidates->data[i + 1].p); sum_ti_bi += t_i * b_i; sum_ti_sq += t_i * t_i; } s_hat = sum_ti_bi / sum_ti_sq; // Compute k from the estimated s_hat and target surprise value float epsilon_hat = s_hat - 1; float k = powf((epsilon_hat * powf(2, *mu)) / (1 - powf(N, -epsilon_hat)), 1 / s_hat); // Sample the next word X using top-k sampling gptneox_sample_top_k(nullptr, candidates, int(k), 1); if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } gptneox_token X = gptneox_sample_token(ctx, candidates); t_start_sample_us = ggml_time_us(); // Compute error as the difference between observed surprise and target surprise value size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const gptneox_token_data & candidate) { return candidate.id == X; })); float observed_surprise = -log2f(candidates->data[X_idx].p); float e = observed_surprise - tau; // Update mu using the learning rate and error *mu = *mu - eta * e; if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; ctx->n_sample++; } return X; } gptneox_token gptneox_sample_token_mirostat_v2(struct gptneox_context * ctx, gptneox_token_data_array * candidates, float tau, float eta, float * mu) { assert(ctx); int64_t t_start_sample_us; t_start_sample_us = ggml_time_us(); gptneox_sample_softmax(ctx, candidates); // Truncate the words with surprise values greater than mu candidates->size = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const gptneox_token_data & candidate) { return -log2f(candidate.p) > *mu; })); // Normalize the probabilities of the remaining words gptneox_sample_softmax(ctx, candidates); // Sample the next word X from the remaining words if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } gptneox_token X = gptneox_sample_token(ctx, candidates); t_start_sample_us = ggml_time_us(); // Compute error as the difference between observed surprise and target surprise value size_t X_idx = std::distance(candidates->data, std::find_if(candidates->data, candidates->data + candidates->size, [&](const gptneox_token_data & candidate) { return candidate.id == X; })); float observed_surprise = -log2f(candidates->data[X_idx].p); float e = observed_surprise - tau; // Update mu using the learning rate and error *mu = *mu - eta * e; if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } return X; } gptneox_token gptneox_sample_token_greedy(struct gptneox_context * ctx, gptneox_token_data_array * candidates) { const int64_t t_start_sample_us = ggml_time_us(); // Find max element auto max_iter = std::max_element(candidates->data, candidates->data + candidates->size, [](const gptneox_token_data & a, const gptneox_token_data & b) { return a.logit < b.logit; }); gptneox_token result = max_iter->id; if (ctx) { ctx->t_sample_us += ggml_time_us() - t_start_sample_us; ctx->n_sample++; } return result; } gptneox_token gptneox_sample_token(struct gptneox_context * ctx, gptneox_token_data_array * candidates) { assert(ctx); const int64_t t_start_sample_us = ggml_time_us(); gptneox_sample_softmax(nullptr, candidates); std::vector<float> probs; probs.reserve(candidates->size); for (size_t i = 0; i < candidates->size; ++i) { probs.push_back(candidates->data[i].p); } std::discrete_distribution<> dist(probs.begin(), probs.end()); auto & rng = ctx->rng; int idx = dist(rng); gptneox_token result = candidates->data[idx].id; ctx->t_sample_us += ggml_time_us() - t_start_sample_us; ctx->n_sample++; return result; } // // quantization // // temp - load then save model, allows for load and save to be different static void gptneox_model_copy_internal(const std::string & fname_inp, const std::string & fname_out, enum gptneox_ftype ftype) { std::unique_ptr<gptneox_model_loader> model_loader(new gptneox_model_loader(fname_inp.c_str(), /*use_mmap*/ false, /*vocab_only*/ false)); gptneox_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), ftype); size_t idx = 0; for (gptneox_load_tensor & tensor : model_loader->tensors_map.tensors) { gptneox_buffer read_data; read_data.resize(tensor.size); tensor.data = read_data.addr; model_loader->load_data_for(tensor); printf("[%4zu/%4zu] %36s - %16s, type = %6s, ", ++idx, model_loader->tensors_map.tensors.size(), tensor.name.c_str(), gptneox_format_tensor_shape(tensor.ne).c_str(), ggml_type_name(tensor.type)); file_saver.write_tensor(tensor, tensor.type, tensor.data, tensor.size); } } int gptneox_model_copy( const char * fname_inp, const char * fname_out, enum gptneox_ftype ftype) { // try { gptneox_model_copy_internal(fname_inp, fname_out, ftype); return 0; // } catch (const std::string & err) { // fprintf(stderr, "%s: failed to copy: %s\n", __func__, err.c_str()); // return 1; // } } static void gptneox_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, enum gptneox_ftype ftype, int nthread) { ggml_type quantized_type; switch (ftype) { case GPTNEOX_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break; case GPTNEOX_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break; case GPTNEOX_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break; case GPTNEOX_FTYPE_MOSTLY_Q4_2: quantized_type = GGML_TYPE_Q4_2; break; case GPTNEOX_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break; case GPTNEOX_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break; case GPTNEOX_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break; default: Die("invalid output file type %d\n", ftype); }; if (nthread <= 0) { nthread = std::thread::hardware_concurrency(); } std::unique_ptr<gptneox_model_loader> model_loader(new gptneox_model_loader(fname_inp.c_str(), /*use_mmap*/ false, /*vocab_only*/ false)); gptneox_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), ftype); size_t total_size_org = 0; size_t total_size_new = 0; std::vector<int64_t> hist_all(1 << 4, 0); std::vector<std::thread> workers; std::mutex mutex; size_t idx = 0; for (gptneox_load_tensor & tensor : model_loader->tensors_map.tensors) { gptneox_buffer read_data; read_data.resize(tensor.size); tensor.data = read_data.addr; model_loader->load_data_for(tensor); printf("[%4zu/%4zu] %50s - %16s, type = %6s, ", ++idx, model_loader->tensors_map.tensors.size(), tensor.name.c_str(), gptneox_format_tensor_shape(tensor.ne).c_str(), ggml_type_name(tensor.type)); // only quantize 2d weights that aren't the output layer bool quantize = tensor.ne.size() == 2 && tensor.type != quantized_type && _endswith(tensor.name.c_str(), "weight") && tensor.name != "output.weight"; enum ggml_type new_type; void * new_data; size_t new_size; gptneox_buffer work; if (!quantize) { new_type = tensor.type; new_data = tensor.data; new_size = tensor.size; printf("size = %8.3f MiB\n", tensor.size/1024.0/1024.0); } else if (quantized_type == GGML_TYPE_F16) { GPTNEOX_ASSERT(tensor.type == GGML_TYPE_F32); size_t nelements = tensor.ne.at(0) * tensor.ne.at(1); new_type = quantized_type; new_size = nelements * 2; work.resize(new_size); new_data = work.addr; ggml_fp32_to_fp16_row((const float *)tensor.data, (ggml_fp16_t *)new_data, nelements); } else { new_type = quantized_type; float * f32_data; size_t nelements = tensor.ne.at(0) * tensor.ne.at(1); gptneox_buffer f32_conv_buf; if (tensor.type == GGML_TYPE_F32) { f32_data = (float *) tensor.data; } else if (tensor.type == GGML_TYPE_F16) { f32_conv_buf.resize(nelements * sizeof(float)); f32_data = (float *) f32_conv_buf.addr; auto f16_data = (const ggml_fp16_t *) tensor.data; for (size_t i = 0; i < nelements; i++) { f32_data[i] = ggml_fp16_to_fp32(f16_data[i]); } } else { Die("type %s unsupported for integer quantization", ggml_type_name(tensor.type)); } printf("quantizing .. "); fflush(stdout); work.resize(nelements * 4); // upper bound on size new_data = work.addr; std::vector<int64_t> hist_cur(1 << 4, 0); int chunk_size = 32 * 512; const int nchunk = (nelements + chunk_size - 1)/chunk_size; const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1; if (nthread_use < 2) { new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data()); } else { size_t counter = 0; new_size = 0; auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size] () { std::vector<int64_t> local_hist; size_t local_size = 0; while (true) { std::unique_lock<std::mutex> lock(mutex); size_t first = counter; counter += chunk_size; if (first >= nelements) { if (!local_hist.empty()) { for (int j=0; j<int(local_hist.size()); ++j) hist_cur[j] += local_hist[j]; new_size += local_size; } break; } lock.unlock(); size_t last = std::min(nelements, first + chunk_size); if (local_hist.empty()) local_hist.resize(hist_cur.size(), 0); local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data()); } }; if (int(workers.size()) < nthread_use - 1) workers.resize(nthread_use - 1); for (int it = 0; it < nthread_use - 1; ++it) workers[it] = std::thread(compute); compute(); for (int it = 0; it < nthread_use - 1; ++it) workers[it].join(); } printf("size = %8.2f MiB -> %8.2f MiB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0); for (size_t i = 0; i < hist_cur.size(); i++) { hist_all[i] += hist_cur[i]; } for (size_t i = 0; i < hist_cur.size(); i++) { printf("%5.3f ", hist_cur[i] / float(nelements)); } printf("\n"); } total_size_org += tensor.size; total_size_new += new_size; file_saver.write_tensor(tensor, new_type, new_data, new_size); } printf("%s: model size = %8.2f MiB\n", __func__, total_size_org/1024.0/1024.0); printf("%s: quant size = %8.2f MiB\n", __func__, total_size_new/1024.0/1024.0); { int64_t sum_all = 0; for (size_t i = 0; i < hist_all.size(); i++) { sum_all += hist_all[i]; } printf("%s: hist: ", __func__); for (size_t i = 0; i < hist_all.size(); i++) { printf("%5.3f ", hist_all[i] / float(sum_all)); } printf("\n"); } } // // interface implementation // struct gptneox_context * gptneox_init_from_file( const char * path_model, struct gptneox_context_params params) { ggjt_v1(); ggml_time_init(); gptneox_context * ctx = new gptneox_context; if (params.seed <= 0) { params.seed = time(NULL); } unsigned cur_percentage = 0; if (params.progress_callback == NULL) { params.progress_callback_user_data = &cur_percentage; params.progress_callback = [](float progress, void * ctx) { unsigned * cur_percentage_p = (unsigned *) ctx; unsigned percentage = (unsigned) (100 * progress); while (percentage > *cur_percentage_p) { ++*cur_percentage_p; fprintf(stderr, "."); fflush(stderr); if (percentage >= 100) { fprintf(stderr, "\n"); } } }; } ctx->rng = std::mt19937(params.seed); ctx->logits_all = params.logits_all; ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; if (!gptneox_model_load(path_model, *ctx, params.n_ctx, memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback, params.progress_callback_user_data)) { fprintf(stderr, "%s: failed to load model\n", __func__); gptneox_free(ctx); return nullptr; } // reserve memory for context buffers if (!params.vocab_only) { if (!kv_cache_init(ctx->model.hparams, ctx->model.kv_self, memory_type, ctx->model.hparams.n_ctx)) { fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__); gptneox_free(ctx); return nullptr; } { const size_t memory_size = ggml_nbytes(ctx->model.kv_self.k) + ggml_nbytes(ctx->model.kv_self.v); fprintf(stderr, "%s: kv self size = %7.2f MiB\n", __func__, memory_size / 1024.0 / 1024.0); } const auto & hparams = ctx->model.hparams; // resized during inference if (params.logits_all) { ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab); } else { ctx->logits.reserve(hparams.n_vocab); } if (params.embedding){ ctx->embedding.resize(hparams.n_embd); } ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type)); ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0().at(ctx->model.type)); ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type)); } return ctx; } void gptneox_free(struct gptneox_context * ctx) { delete ctx; } int gptneox_model_quantize( const char * fname_inp, const char * fname_out, enum gptneox_ftype ftype, int nthread) { // try { gptneox_model_quantize_internal(fname_inp, fname_out, ftype, nthread); return 0; // } catch (const std::string & err) { // fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.c_str()); // return 1; // } } int gptneox_apply_lora_from_file_internal(struct gptneox_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) { fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); auto & model = ctx->model; const int64_t t_start_lora_us = ggml_time_us(); auto fin = std::ifstream(path_lora, std::ios::binary); if (!fin) { fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora); return 1; } // verify magic and version { uint32_t magic; fin.read((char *) &magic, sizeof(magic)); if (magic != READ32BE("ggla")) { fprintf(stderr, "%s: bad file magic\n", __func__); return 1; } uint32_t format_version; fin.read((char *) &format_version, sizeof(format_version)); if (format_version != 1) { fprintf(stderr, "%s: unsupported file version\n", __func__ ); return 1; } } int32_t lora_r; int32_t lora_alpha; fin.read((char *) &lora_r, sizeof(lora_r)); fin.read((char *) &lora_alpha, sizeof(lora_alpha)); float scaling = (float)lora_alpha / (float)lora_r; fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling); // create a temporary ggml context to store the lora tensors // todo: calculate size from biggest possible tensor std::vector<uint8_t> lora_buf(1024ull * 1024ull * 1024ull); struct ggml_init_params params; params.mem_size = lora_buf.size(); params.mem_buffer = lora_buf.data(); params.no_alloc = false; ggml_context * lora_ctx = ggml_init(params); std::unordered_map<std::string, struct ggml_tensor *> lora_tensors; // create a name -> tensor map of the model to accelerate lookups std::unordered_map<std::string, struct ggml_tensor*> model_tensors; for (auto & kv: model.tensors_by_name) { model_tensors.insert(kv); } // load base model std::unique_ptr<gptneox_model_loader> model_loader; ggml_context * base_ctx = NULL; gptneox_buffer base_buf; if (path_base_model) { fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model); model_loader.reset(new gptneox_model_loader(path_base_model, /*use_mmap*/ true, /*vocab_only*/ false)); size_t ctx_size, mmapped_size; model_loader->calc_sizes(&ctx_size, &mmapped_size); base_buf.resize(ctx_size); ggml_init_params base_params; base_params.mem_size = base_buf.size; base_params.mem_buffer = base_buf.addr; base_params.no_alloc = model_loader->use_mmap; base_ctx = ggml_init(base_params); model_loader->ggml_ctx = base_ctx; // maybe this should in gptneox_model_loader if (model_loader->use_mmap) { model_loader->mapping.reset(new gptneox_mmap(&model_loader->file_loaders.at(0)->file, /* prefetch */ false)); } } // read tensors and apply bool warned = false; int n_tensors = 0; while (true) { int32_t n_dims; int32_t length; int32_t ftype; fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims)); fin.read(reinterpret_cast<char *>(&length), sizeof(length)); fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype)); if (fin.eof()) { break; } int32_t ne[2] = { 1, 1 }; for (int i = 0; i < n_dims; ++i) { fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i])); } std::string name(length, 0); fin.read(&name[0], length); // check for lora suffix and get the type of tensor const std::string lora_suffix = ".lora"; size_t pos = name.rfind(lora_suffix); if (pos == std::string::npos) { fprintf(stderr, "%s: error: '%s' is not a lora tensor\n", __func__, name.c_str()); return 1; } std::string lora_type = name.substr(pos + lora_suffix.length()); std::string base_name = name; base_name.erase(pos); // fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str()); if (model_tensors.find(base_name.data()) == model_tensors.end()) { fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data()); return 1; } // create ggml tensor ggml_type wtype; switch (ftype) { case 0: wtype = GGML_TYPE_F32; break; case 1: wtype = GGML_TYPE_F16; break; default: { fprintf(stderr, "%s: invalid tensor data type '%d'\n", __func__, ftype); return false; } } ggml_tensor* lora_tensor; if (n_dims == 2) { lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]); } else { fprintf(stderr, "%s: unsupported tensor dimension %d\n", __func__, n_dims); return 1; } // load tensor data size_t offset = fin.tellg(); size_t tensor_data_size = ggml_nbytes(lora_tensor); offset = (offset + 31) & -32; fin.seekg(offset); fin.read((char*)lora_tensor->data, tensor_data_size); lora_tensors[name] = lora_tensor; // check if we have both A and B tensors and apply if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() && lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) { ggml_tensor * dest_t = model_tensors[base_name]; ggml_tensor * base_t; if (model_loader) { // load from base model if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) { fprintf(stderr, "%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str()); return 1; } size_t idx = model_loader->tensors_map.name_to_idx[base_name]; gptneox_load_tensor & lt = model_loader->tensors_map.tensors[idx]; base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }); lt.data = (uint8_t *) lt.ggml_tensor->data; model_loader->load_data_for(lt); lt.ggml_tensor->data = lt.data; } else { base_t = dest_t; } if (ggml_is_quantized(base_t->type)) { if (!warned) { fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, " "use a f16 or f32 base model with --lora-base\n", __func__); warned = true; } } ggml_tensor * loraA = lora_tensors[base_name + ".loraA"]; ggml_tensor * loraB = lora_tensors[base_name + ".loraB"]; if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) { fprintf(stderr, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");" " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]); return 1; } // w = w + BA*s ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB); if (scaling != 1.0f) { ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling); BA = ggml_scale(lora_ctx, BA, scale_tensor); } ggml_tensor * r; if (base_t == dest_t) { r = ggml_add_inplace(lora_ctx, dest_t, BA); } else { r = ggml_add(lora_ctx, base_t, BA); r = ggml_cpy(lora_ctx, r, dest_t); } struct ggml_cgraph gf = ggml_build_forward(r); gf.n_threads = n_threads; ggml_graph_compute(lora_ctx, &gf); // we won't need these tensors again, reset the context to save memory ggml_free(lora_ctx); lora_ctx = ggml_init(params); lora_tensors.clear(); n_tensors++; if (n_tensors % 4 == 0) fprintf(stderr, "."); } } // TODO: this should be in a destructor, it will leak on failure ggml_free(lora_ctx); if (base_ctx) { ggml_free(base_ctx); } const int64_t t_lora_us = ggml_time_us() - t_start_lora_us; fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0); return 0; } int gptneox_apply_lora_from_file(struct gptneox_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) { // try { return gptneox_apply_lora_from_file_internal(ctx, path_lora, path_base_model, n_threads); // } catch (const std::string & err) { // fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.c_str()); // return 1; // } } int gptneox_get_kv_cache_token_count(struct gptneox_context * ctx) { return ctx->model.kv_self.n; } #define GPTNEOX_MAX_RNG_STATE 64*1024 void gptneox_set_rng_seed(struct gptneox_context * ctx, int seed) { if (seed <= 0) { seed = time(NULL); } ctx->rng.seed(seed); } // Returns the size of the state size_t gptneox_get_state_size(struct gptneox_context * ctx) { // we don't know size of rng until we actually serialize it. so reserve more than enough memory for its serialized state. // for reference, std::mt19937(1337) serializes to 6701 bytes. const size_t s_rng_size = sizeof(size_t); const size_t s_rng = GPTNEOX_MAX_RNG_STATE; const size_t s_logits_capacity = sizeof(size_t); const size_t s_logits_size = sizeof(size_t); const size_t s_logits = ctx->logits.capacity() * sizeof(float); const size_t s_embedding_size = sizeof(size_t); const size_t s_embedding = ctx->embedding.size() * sizeof(float); const size_t s_kv_size = sizeof(size_t); const size_t s_kv_ntok = sizeof(int); const size_t s_kv = ctx->model.kv_self.buf.size; const size_t s_total = ( + s_rng_size + s_rng + s_logits_capacity + s_logits_size + s_logits + s_embedding_size + s_embedding + s_kv_size + s_kv_ntok + s_kv ); return s_total; } // Copies the state to the specified destination address size_t gptneox_copy_state_data(struct gptneox_context * ctx, uint8_t * dest) { uint8_t * out = dest; // copy rng { std::stringstream rng_ss; rng_ss << ctx->rng; const size_t rng_size = rng_ss.str().size(); char rng_buf[GPTNEOX_MAX_RNG_STATE]; memset(&rng_buf[0], 0, GPTNEOX_MAX_RNG_STATE); memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size()); memcpy(out, &rng_size, sizeof(rng_size)); out += sizeof(rng_size); memcpy(out, &rng_buf[0], GPTNEOX_MAX_RNG_STATE); out += GPTNEOX_MAX_RNG_STATE; } // copy logits { const size_t logits_cap = ctx->logits.capacity(); const size_t logits_size = ctx->logits.size(); memcpy(out, &logits_cap, sizeof(logits_cap)); out += sizeof(logits_cap); memcpy(out, &logits_size, sizeof(logits_size)); out += sizeof(logits_size); if (logits_size) { memcpy(out, ctx->logits.data(), logits_size * sizeof(float)); } out += logits_cap * sizeof(float); } // copy embeddings { const size_t embedding_size = ctx->embedding.size(); memcpy(out, &embedding_size, sizeof(embedding_size)); out += sizeof(embedding_size); if (embedding_size) { memcpy(out, ctx->embedding.data(), embedding_size * sizeof(float)); out += embedding_size * sizeof(float); } } // copy kv cache { const size_t kv_size = ctx->model.kv_self.buf.size; const int kv_ntok = gptneox_get_kv_cache_token_count(ctx); memcpy(out, &kv_size, sizeof(kv_size)); out += sizeof(kv_size); memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok); if (kv_size) { memcpy(out, ctx->model.kv_self.buf.addr, kv_size); out += kv_size; } } const size_t written = out - dest; const size_t expected = gptneox_get_state_size(ctx); GPTNEOX_ASSERT(written == expected); return written; } // Sets the state reading from the specified source address size_t gptneox_set_state_data(struct gptneox_context * ctx, const uint8_t * src) { const uint8_t * in = src; // set rng { size_t rng_size; char rng_buf[GPTNEOX_MAX_RNG_STATE]; memcpy(&rng_size, in, sizeof(rng_size)); in += sizeof(rng_size); memcpy(&rng_buf[0], in, GPTNEOX_MAX_RNG_STATE); in += GPTNEOX_MAX_RNG_STATE; std::stringstream rng_ss; rng_ss.str(std::string(&rng_buf[0], rng_size)); rng_ss >> ctx->rng; GPTNEOX_ASSERT(rng_ss.fail() == false); } // set logits { size_t logits_cap; size_t logits_size; memcpy(&logits_cap, in, sizeof(logits_cap)); in += sizeof(logits_cap); memcpy(&logits_size, in, sizeof(logits_size)); in += sizeof(logits_size); GPTNEOX_ASSERT(ctx->logits.capacity() == logits_cap); if (logits_size) { ctx->logits.resize(logits_size); memcpy(ctx->logits.data(), in, logits_size * sizeof(float)); } in += logits_cap * sizeof(float); } // set embeddings { size_t embedding_size; memcpy(&embedding_size, in, sizeof(embedding_size)); in += sizeof(embedding_size); GPTNEOX_ASSERT(ctx->embedding.capacity() == embedding_size); if (embedding_size) { memcpy(ctx->embedding.data(), in, embedding_size * sizeof(float)); in += embedding_size * sizeof(float); } } // set kv cache { size_t kv_size; int kv_ntok; memcpy(&kv_size, in, sizeof(kv_size)); in += sizeof(kv_size); memcpy(&kv_ntok, in, sizeof(kv_ntok)); in += sizeof(kv_ntok); if (kv_size) { GPTNEOX_ASSERT(ctx->model.kv_self.buf.size == kv_size); void * k_data = ctx->model.kv_self.k->data; // remember data pointers void * v_data = ctx->model.kv_self.v->data; // because their value is stored in buf and overwritten by memcpy memcpy(ctx->model.kv_self.buf.addr, in, kv_size); in += kv_size; ctx->model.kv_self.k->data = k_data; // restore correct data pointers ctx->model.kv_self.v->data = v_data; } ctx->model.kv_self.n = kv_ntok; } const size_t nread = in - src; const size_t expected = gptneox_get_state_size(ctx); GPTNEOX_ASSERT(nread == expected); return nread; } int gptneox_eval( struct gptneox_context * ctx, const gptneox_token * tokens, int n_tokens, int n_past, int n_threads) { if (!gptneox_eval_internal(*ctx, tokens, n_tokens, n_past, n_threads)) { fprintf(stderr, "%s: failed to eval\n", __func__); return 1; } // get a more accurate load time, upon first eval if (!ctx->has_evaluated_once) { ctx->t_load_us = ggml_time_us() - ctx->t_start_us; ctx->has_evaluated_once = true; } return 0; } int gptneox_tokenize( struct gptneox_context * ctx, const char * text, gptneox_token * tokens, int n_max_tokens, bool add_bos) { auto res = gptneox_tokenize(ctx->vocab, text, add_bos); if (n_max_tokens < (int) res.size()) { fprintf(stderr, "%s: too many tokens\n", __func__); return -((int) res.size()); } for (size_t i = 0; i < res.size(); i++) { tokens[i] = res[i]; } return res.size(); } int gptneox_n_vocab(struct gptneox_context * ctx) { return ctx->vocab.id_to_token.size(); } int gptneox_n_ctx(struct gptneox_context * ctx) { return ctx->model.hparams.n_ctx; } int gptneox_n_embd(struct gptneox_context * ctx) { return ctx->model.hparams.n_embd; } float * gptneox_get_logits(struct gptneox_context * ctx) { return ctx->logits.data(); } float * gptneox_get_embeddings(struct gptneox_context * ctx) { return ctx->embedding.data(); } const char * gptneox_token_to_str(struct gptneox_context * ctx, gptneox_token token) { if (token >= gptneox_n_vocab(ctx)) { return nullptr; } return ctx->vocab.id_to_token[token].tok.c_str(); } gptneox_token gptneox_str_to_token(struct gptneox_context * ctx, const char * str) { return ctx->vocab.token_to_id[str]; } gptneox_token gptneox_token_bos() { return 0; } gptneox_token gptneox_token_eos() { return 0; } // Varies depending on gptneox model, use gptneox_str_to_token instead gptneox_token gptneox_token_nl() { return 13; } void gptneox_print_timings(struct gptneox_context * ctx) { const int64_t t_end_us = ggml_time_us(); const int32_t n_sample = std::max(1, ctx->n_sample); const int32_t n_eval = std::max(1, ctx->n_eval); const int32_t n_p_eval = std::max(1, ctx->n_p_eval); fprintf(stderr, "\n"); fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0); fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3 * ctx->t_sample_us, n_sample, 1e-3 * ctx->t_sample_us / n_sample); fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token)\n", __func__, 1e-3 * ctx->t_p_eval_us, n_p_eval, 1e-3 * ctx->t_p_eval_us / n_p_eval); fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3 * ctx->t_eval_us, n_eval, 1e-3 * ctx->t_eval_us / n_eval); fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0); } void gptneox_reset_timings(struct gptneox_context * ctx) { ctx->t_start_us = ggml_time_us(); ctx->t_sample_us = ctx->n_sample = 0; ctx->t_eval_us = ctx->n_eval = 0; ctx->t_p_eval_us = ctx->n_p_eval = 0; } const char * gptneox_print_system_info(void) { static std::string s; s = ""; s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | "; s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | "; s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | "; s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | "; s += "AVX512_VNNI = " + std::to_string(ggml_cpu_has_avx512_vnni()) + " | "; s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | "; s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | "; s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | "; s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | "; s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | "; s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | "; s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | "; s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | "; s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | "; return s.c_str(); } // For internal test use std::vector<std::pair<std::string, struct ggml_tensor *>>& gptneox_internal_get_tensor_map(struct gptneox_context * ctx) { return ctx->model.tensors_by_name; } size_t gptneox_load_session_file(struct gptneox_context * ctx, const char * path_session, gptneox_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { // TODO leverage mmap gptneox_file file(path_session, "rb"); const uint32_t magic = file.read_u32(); const uint32_t version = file.read_u32(); if (!(magic == READ32BE("ggsn") && version == 0)) { fprintf(stderr, "%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version); return 0; } gptneox_hparams session_hparams; file.read_raw(&session_hparams, sizeof(gptneox_hparams)); // REVIEW if (session_hparams != ctx->model.hparams) { fprintf(stderr, "%s : model hparams didn't match from session file!\n", __func__); return 0; } const uint32_t n_token_count = file.read_u32(); GPTNEOX_ASSERT(n_token_capacity >= n_token_count); file.read_raw(tokens_out, sizeof(gptneox_token) * n_token_count); *n_token_count_out = n_token_count; const size_t n_state_size = file.size - file.tell(); const size_t n_orig_state_size = gptneox_get_state_size(ctx); if (n_state_size != n_orig_state_size) { fprintf(stderr, "%s : failed to validate state size\n", __func__); } std::unique_ptr<uint8_t[]> state_data(new uint8_t[n_state_size]); file.read_raw(state_data.get(), n_state_size); return gptneox_set_state_data(ctx, state_data.get()); } size_t gptneox_save_session_file(struct gptneox_context * ctx, const char * path_session, const gptneox_token * tokens, size_t n_token_count) { // TODO save temp & swap gptneox_file file(path_session, "wb"); const size_t n_state_size = gptneox_get_state_size(ctx); std::unique_ptr<uint8_t[]> state_data(new uint8_t[n_state_size]); gptneox_copy_state_data(ctx, state_data.get()); file.write_u32(READ32BE("ggsn")); // magic file.write_u32(0); // version file.write_raw(&ctx->model.hparams, sizeof(gptneox_hparams)); file.write_u32((uint32_t) n_token_count); // REVIEW file.write_raw(tokens, sizeof(gptneox_token) * n_token_count); file.write_raw(state_data.get(), n_state_size); return n_state_size; // REVIEW }
109,223
2,989
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/copy-gptneox.cc
/*-*-mode:c++;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8-*-│ │vi: set net ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ radpajama.com │ │ Copyright (c) 2023 Ariel Núñez │ │ Copyright (c) 2023 Georgi Gerganov │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/log/log.h" #include "third_party/ggml/ggml.h" #include "third_party/libcxx/cstdio" #include "third_party/libcxx/map" #include "third_party/libcxx/string" #include "third_party/radpajama/gptneox.h" // clang-format off static const std::map<std::string, enum gptneox_ftype> GPTNEOX_FTYPE_MAP = { {"q4_0", GPTNEOX_FTYPE_MOSTLY_Q4_0}, {"q4_1", GPTNEOX_FTYPE_MOSTLY_Q4_1}, {"q4_2", GPTNEOX_FTYPE_MOSTLY_Q4_2}, //{"q4_3", GPTNEOX_FTYPE_MOSTLY_Q4_3}, {"q5_0", GPTNEOX_FTYPE_MOSTLY_Q5_0}, {"q5_1", GPTNEOX_FTYPE_MOSTLY_Q5_1}, {"q8_0", GPTNEOX_FTYPE_MOSTLY_Q8_0}, }; // usage: // ./quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type // int main(int argc, char ** argv) { MakeProcessNice(); ShowCrashReports(); ggjt_v1(); ggml_time_init(); if (argc < 4) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin ftype\n", argv[0]); for (auto it = GPTNEOX_FTYPE_MAP.begin(); it != GPTNEOX_FTYPE_MAP.end(); it++) { fprintf(stderr, " type = \"%s\" or %d\n", it->first.c_str(), it->second); } return 1; } // needed to initialize f16 tables { struct ggml_init_params params = { 0, NULL, false }; struct ggml_context * ctx = ggml_init(params); ggml_free(ctx); } const std::string fname_inp = argv[1]; const std::string fname_out = argv[2]; enum gptneox_ftype ftype; if (argv[3][0] == 'q') { auto it = GPTNEOX_FTYPE_MAP.find(argv[3]); if (it == GPTNEOX_FTYPE_MAP.end()) { fprintf(stderr, "%s: unknown ftype '%s'\n", __func__, argv[3]); return 1; } ftype = it->second; } else { ftype = (enum gptneox_ftype)atoi(argv[3]); } gptneox_model_copy(fname_inp.c_str(), fname_out.c_str(), ftype); return 0; }
4,536
91
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/common-gptneox.cc
/*-*-mode:c++;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8-*-│ │vi: set net ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ radpajama.com │ │ Copyright (c) 2023 Ariel Núñez │ │ Copyright (c) 2023 Georgi Gerganov │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "third_party/radpajama/common-gptneox.h" #include "third_party/ggml/llama_util.h" #include "third_party/libcxx/algorithm" #include "third_party/libcxx/cassert" #include "third_party/libcxx/cstring" #include "third_party/libcxx/fstream" #include "third_party/libcxx/iostream" #include "third_party/libcxx/iterator" #include "third_party/libcxx/sstream" #include "third_party/libcxx/string" // clang-format off bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { params.n_threads = std::min(20, std::max(1, (int)(_getcpucount() * 0.75))); bool invalid_param = false; std::string arg; gpt_params default_params; for (int i = 1; i < argc; i++) { arg = argv[i]; if (arg == "-s" || arg == "--seed") { if (++i >= argc) { invalid_param = true; break; } params.seed = std::stoi(argv[i]); } else if (arg == "-t" || arg == "--threads") { if (++i >= argc) { invalid_param = true; break; } params.n_threads = std::stoi(argv[i]); } else if (arg == "-p" || arg == "--prompt") { if (++i >= argc) { invalid_param = true; break; } params.prompt = argv[i]; } else if (arg == "--session") { if (++i >= argc) { invalid_param = true; break; } params.path_session = argv[i]; } else if (arg == "-f" || arg == "--file") { if (++i >= argc) { invalid_param = true; break; } std::ifstream file(argv[i]); if (!file) { fprintf(stderr, "error: failed to open file '%s'\n", argv[i]); invalid_param = true; break; } std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt)); if (params.prompt.back() == '\n') { params.prompt.pop_back(); } } else if (arg == "-n" || arg == "--n_predict") { if (++i >= argc) { invalid_param = true; break; } params.n_predict = std::stoi(argv[i]); } else if (arg == "--top_k") { if (++i >= argc) { invalid_param = true; break; } params.top_k = std::stoi(argv[i]); } else if (arg == "-c" || arg == "--ctx_size") { if (++i >= argc) { invalid_param = true; break; } params.n_ctx = std::stoi(argv[i]); } else if (arg == "--memory_f32") { params.memory_f16 = false; } else if (arg == "--top_p") { if (++i >= argc) { invalid_param = true; break; } params.top_p = std::stof(argv[i]); } else if (arg == "--temp") { if (++i >= argc) { invalid_param = true; break; } params.temp = std::stof(argv[i]); } else if (arg == "--tfs") { if (++i >= argc) { invalid_param = true; break; } params.tfs_z = std::stof(argv[i]); } else if (arg == "--typical") { if (++i >= argc) { invalid_param = true; break; } params.typical_p = std::stof(argv[i]); } else if (arg == "--repeat_last_n") { if (++i >= argc) { invalid_param = true; break; } params.repeat_last_n = std::stoi(argv[i]); } else if (arg == "--repeat_penalty") { if (++i >= argc) { invalid_param = true; break; } params.repeat_penalty = std::stof(argv[i]); } else if (arg == "--frequency_penalty") { if (++i >= argc) { invalid_param = true; break; } params.frequency_penalty = std::stof(argv[i]); } else if (arg == "--presence_penalty") { if (++i >= argc) { invalid_param = true; break; } params.presence_penalty = std::stof(argv[i]); } else if (arg == "--mirostat") { if (++i >= argc) { invalid_param = true; break; } params.mirostat = std::stoi(argv[i]); } else if (arg == "--mirostat_lr") { if (++i >= argc) { invalid_param = true; break; } params.mirostat_eta = std::stof(argv[i]); } else if (arg == "--mirostat_ent") { if (++i >= argc) { invalid_param = true; break; } params.mirostat_tau = std::stof(argv[i]); } else if (arg == "-b" || arg == "--batch_size") { if (++i >= argc) { invalid_param = true; break; } params.n_batch = std::stoi(argv[i]); params.n_batch = std::min(512, params.n_batch); } else if (arg == "--keep") { if (++i >= argc) { invalid_param = true; break; } params.n_keep = std::stoi(argv[i]); } else if (arg == "-m" || arg == "--model") { if (++i >= argc) { invalid_param = true; break; } params.model = argv[i]; } else if (arg == "--lora") { if (++i >= argc) { invalid_param = true; break; } params.lora_adapter = argv[i]; params.use_mmap = false; } else if (arg == "--lora-base") { if (++i >= argc) { invalid_param = true; break; } params.lora_base = argv[i]; } else if (arg == "-i" || arg == "--interactive") { params.interactive = true; } else if (arg == "--embedding") { params.embedding = true; } else if (arg == "--interactive-first") { params.interactive_first = true; } else if (arg == "-ins" || arg == "--instruct") { params.instruct = true; } else if (arg == "--color") { params.use_color = true; } else if (arg == "--mlock") { params.use_mlock = true; } else if (arg == "--no-mmap") { params.use_mmap = false; } else if (arg == "--mtest") { params.mem_test = true; } else if (arg == "--verbose-prompt") { params.verbose_prompt = true; } else if (arg == "-r" || arg == "--reverse-prompt") { if (++i >= argc) { invalid_param = true; break; } params.antiprompt.push_back(argv[i]); } else if (arg == "--perplexity") { params.perplexity = true; } else if (arg == "--ignore-eos") { params.logit_bias[gptneox_token_eos()] = -INFINITY; } else if (arg == "--no-penalize-nl") { params.penalize_nl = false; } else if (arg == "-l" || arg == "--logit-bias") { if (++i >= argc) { invalid_param = true; break; } std::stringstream ss(argv[i]); gptneox_token key = 0; char sign = 0; std::string value_str; if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) { params.logit_bias[key] = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f); } else { invalid_param = true; break; } } else if (arg == "--n_parts") { if (++i >= argc) { invalid_param = true; break; } params.n_parts = std::stoi(argv[i]); } else if (arg == "-h" || arg == "--help") { gpt_print_usage(argc, argv, default_params); exit(0); } else if (arg == "--random-prompt") { params.random_prompt = true; } else if (arg == "--in-prefix") { if (++i >= argc) { invalid_param = true; break; } params.input_prefix = argv[i]; } else { fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); gpt_print_usage(argc, argv, default_params); exit(1); } } if (invalid_param) { fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str()); gpt_print_usage(argc, argv, default_params); exit(1); } return true; } void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, "usage: %s [options]\n", argv[0]); fprintf(stderr, "\n"); fprintf(stderr, "options:\n"); fprintf(stderr, " -h, --help show this help message and exit\n"); fprintf(stderr, " -i, --interactive run in interactive mode\n"); fprintf(stderr, " --interactive-first run in interactive mode and wait for input right away\n"); fprintf(stderr, " -ins, --instruct run in instruction mode\n"); fprintf(stderr, " -r PROMPT, --reverse-prompt PROMPT\n"); fprintf(stderr, " run in interactive mode and poll user input upon seeing PROMPT (can be\n"); fprintf(stderr, " specified more than once for multiple prompts).\n"); fprintf(stderr, " --color colorise output to distinguish prompt and user input from generations\n"); fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1, use random seed for <= 0)\n"); fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); fprintf(stderr, " -p PROMPT, --prompt PROMPT\n"); fprintf(stderr, " prompt to start generation with (default: empty)\n"); fprintf(stderr, " --session FNAME file to cache model state in (may be large!) (default: none)\n"); fprintf(stderr, " --random-prompt start with a randomized prompt.\n"); fprintf(stderr, " --in-prefix STRING string to prefix user inputs with (default: empty)\n"); fprintf(stderr, " -f FNAME, --file FNAME\n"); fprintf(stderr, " prompt file to start generation.\n"); fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d, -1 = infinity)\n", params.n_predict); fprintf(stderr, " --top_k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k); fprintf(stderr, " --top_p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p); fprintf(stderr, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z); fprintf(stderr, " --typical N locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)\n", (double)params.typical_p); fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)\n", params.repeat_last_n); fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)params.repeat_penalty); fprintf(stderr, " --presence_penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)params.presence_penalty); fprintf(stderr, " --frequency_penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)params.frequency_penalty); fprintf(stderr, " --mirostat N use Mirostat sampling.\n"); fprintf(stderr, " Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n"); fprintf(stderr, " (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", params.mirostat); fprintf(stderr, " --mirostat_lr N Mirostat learning rate, parameter eta (default: %.1f)\n", (double)params.mirostat_eta); fprintf(stderr, " --mirostat_ent N Mirostat target entropy, parameter tau (default: %.1f)\n", (double)params.mirostat_tau); fprintf(stderr, " -l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS\n"); fprintf(stderr, " modifies the likelihood of token appearing in the completion,\n"); fprintf(stderr, " i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n"); fprintf(stderr, " or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'\n"); fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx); fprintf(stderr, " --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n"); fprintf(stderr, " --no-penalize-nl do not penalize newline token\n"); fprintf(stderr, " --memory_f32 use f32 instead of f16 for memory key+value\n"); fprintf(stderr, " --temp N temperature (default: %.1f)\n", (double)params.temp); fprintf(stderr, " --n_parts N number of model parts (default: -1 = determine from dimensions)\n"); fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); fprintf(stderr, " --perplexity compute perplexity over the prompt\n"); fprintf(stderr, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep); if (gptneox_mlock_supported()) { fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n"); } if (gptneox_mmap_supported()) { fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } fprintf(stderr, " --mtest compute maximum memory usage\n"); fprintf(stderr, " --verbose-prompt print prompt before generation\n"); fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n"); fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n"); fprintf(stderr, " -m FNAME, --model FNAME\n"); fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); fprintf(stderr, "\n"); } std::string gpt_random_prompt(std::mt19937 & rng) { const int r = rng() % 10; switch (r) { case 0: return "So"; case 1: return "Once upon a time"; case 2: return "When"; case 3: return "The"; case 4: return "After"; case 5: return "If"; case 6: return "import"; case 7: return "He"; case 8: return "She"; case 9: return "They"; default: return "To"; } return "The"; } // TODO: not great allocating this every time std::vector<gptneox_token> gptneox_tokenize(struct gptneox_context * ctx, const std::string & text, bool add_bos) { // initialize to prompt numer of chars, since n_tokens <= n_prompt_chars std::vector<gptneox_token> res(text.size() + (int)add_bos); int n = gptneox_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos); assert(n >= 0); res.resize(n); return res; } /* Keep track of current color of output, and emit ANSI code if it changes. */ void set_console_color(console_state & con_st, console_color_t color) { if (con_st.use_color && con_st.color != color) { switch(color) { case CONSOLE_COLOR_DEFAULT: printf(ANSI_COLOR_RESET); break; case CONSOLE_COLOR_PROMPT: printf(ANSI_COLOR_YELLOW); break; case CONSOLE_COLOR_USER_INPUT: printf(ANSI_BOLD ANSI_COLOR_GREEN); break; } con_st.color = color; } }
19,043
395
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/main-redpajama-chat.cc
/*-*-mode:c++;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8-*-│ │vi: set net ft=c++ ts=4 sts=4 sw=4 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ radpajama.com │ │ Copyright (c) 2023 Ariel Núñez │ │ Copyright (c) 2023 Georgi Gerganov │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/calls/calls.h" #include "libc/calls/sigtimedwait.h" #include "libc/calls/struct/sigaction.h" #include "libc/calls/struct/siginfo.h" #include "libc/calls/weirdtypes.h" #include "libc/log/log.h" #include "libc/runtime/pathconf.h" #include "libc/runtime/runtime.h" #include "libc/runtime/sysconf.h" #include "libc/sysv/consts/f.h" #include "libc/sysv/consts/fileno.h" #include "libc/sysv/consts/o.h" #include "libc/sysv/consts/ok.h" #include "libc/sysv/consts/sa.h" #include "libc/sysv/consts/sicode.h" #include "libc/sysv/consts/ss.h" #include "libc/time/time.h" #include "third_party/getopt/getopt.h" #include "third_party/libcxx/algorithm" #include "third_party/libcxx/cassert" #include "third_party/libcxx/cinttypes" #include "third_party/libcxx/cmath" #include "third_party/libcxx/cstdio" #include "third_party/libcxx/cstring" #include "third_party/libcxx/ctime" #include "third_party/libcxx/fstream" #include "third_party/libcxx/iostream" #include "third_party/libcxx/string" #include "third_party/libcxx/vector" #include "third_party/musl/crypt.h" #include "third_party/musl/lockf.h" #include "third_party/radpajama/common-gptneox.h" #include "third_party/radpajama/gptneox.h" // clang-format off static console_state con_st; static gptneox_context ** g_ctx; static bool is_interacting = false; void sigint_handler(int signo) { set_console_color(con_st, CONSOLE_COLOR_DEFAULT); printf("\n"); // this also force flush stdout. if (signo == SIGINT) { if (!is_interacting) { is_interacting=true; } else { gptneox_print_timings(*g_ctx); _exit(130); } } } int main(int argc, char ** argv) { gpt_params params; params.model = "./models/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin"; con_st.use_color = true; params.n_ctx = 2048; params.seed = 1684054676; params.use_mmap = true; params.use_mlock = true; params.memory_f16 = true; params.mem_test = false; params.interactive = true; params.top_k = 30; params.top_p = 0.95; params.temp = 0.8; params.repeat_last_n = 3; params.repeat_penalty = 1.1; params.instruct = true; params.interactive = true; MakeProcessNice(); ShowCrashReports(); if (gpt_params_parse(argc, argv, params) == false) { return 1; } std::mt19937 rng(params.seed); gptneox_context * ctx; g_ctx = &ctx; { auto lparams = gptneox_context_default_params(); lparams.n_ctx = params.n_ctx; lparams.n_parts = params.n_parts; lparams.seed = params.seed; lparams.f16_kv = params.memory_f16; lparams.use_mmap = params.use_mmap; lparams.use_mlock = params.use_mlock; ctx = gptneox_init_from_file(params.model.c_str(), lparams); if (ctx == NULL) { fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str()); return 1; } } if (!params.lora_adapter.empty()) { int err = gptneox_apply_lora_from_file(ctx, params.lora_adapter.c_str(), params.lora_base.empty() ? NULL : params.lora_base.c_str(), params.n_threads); if (err != 0) { fprintf(stderr, "%s: error: failed to apply lora adapter\n", __func__); return 1; } } MakeProcessNice(); ShowCrashReports(); // Always interactive for RedPajama chat model params.interactive = true; if (params.interactive) { struct sigaction sigint_action; sigint_action.sa_handler = sigint_handler; sigemptyset (&sigint_action.sa_mask); sigint_action.sa_flags = 0; sigaction(SIGINT, &sigint_action, NULL); } fprintf(stderr, "sampling: temp = %f, top_k = %d, top_p = %f, repeat_last_n = %i, repeat_penalty = %f\n", params.temp, params.top_k, params.top_p, params.repeat_last_n, params.repeat_penalty); fprintf(stderr, "generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", params.n_ctx, params.n_batch, params.n_predict, params.n_keep); fprintf(stderr, "\n\n"); // TODO: replace with ring-buffer std::vector<gptneox_token> last_n_tokens = std::vector<gptneox_token>(); set_console_color(con_st, CONSOLE_COLOR_PROMPT); const int32_t top_k = params.top_k; const float top_p = params.top_p; const float temp = params.temp; const float repeat_penalty = params.repeat_penalty; while (true) { is_interacting = true; int n_past = 0; set_console_color(con_st, CONSOLE_COLOR_USER_INPUT); if (params.instruct) { printf("\n<human>: "); } std::string buffer; if (!params.input_prefix.empty()) { buffer += params.input_prefix; printf("%s", buffer.c_str()); } std::string line; bool another_line = true; do { if (!std::getline(std::cin, line)) { // input stream is bad or EOF received return 0; } if (line.empty() || line.back() != '\\') { another_line = false; } else { line.pop_back(); // Remove the continue character } buffer += line; if (another_line) { buffer += '\n'; } } while (another_line); is_interacting = false; // done taking input, reset color set_console_color(con_st, CONSOLE_COLOR_DEFAULT); // Check for input if (buffer.length() <= 0) { continue; // Restart loop for input } auto prompt_embd = ::gptneox_tokenize(ctx, buffer, false); auto embd_inp = std::vector<gptneox_token>(); embd_inp.push_back(gptneox_str_to_token(ctx, "<")); embd_inp.push_back(gptneox_str_to_token(ctx, "human")); embd_inp.push_back(gptneox_str_to_token(ctx, ">:")); embd_inp.insert(embd_inp.end(), prompt_embd.begin(), prompt_embd.end()); embd_inp.push_back(gptneox_str_to_token(ctx, "\n")); embd_inp.push_back(gptneox_str_to_token(ctx, "<")); embd_inp.push_back(gptneox_str_to_token(ctx, "bot")); embd_inp.push_back(gptneox_str_to_token(ctx, ">:")); // How many tokens to generate - check if theres space in context for atleast one token (or batch size tokens?) auto inp_size = embd_inp.size(); auto space = params.n_ctx - inp_size; if(space <= 0) { fprintf(stderr, "%s : input too long\n", __func__); continue; } // Send batches to eval while (n_past < inp_size) { auto remaining = inp_size - n_past; int n_eval = params.n_batch < remaining ? params.n_batch : remaining; if (gptneox_eval(ctx, &embd_inp[n_past], n_eval, n_past, params.n_threads)) { fprintf(stderr, "<bot>: %s : failed to eval\n", __func__); return 1; } n_past += n_eval; } const int n_ctx = gptneox_n_ctx(ctx); const int n_vocab = gptneox_n_vocab(ctx); const float temp = params.temp; const int32_t top_k = params.top_k <= 0 ? gptneox_n_vocab(ctx) : params.top_k; const float top_p = params.top_p; const float tfs_z = params.tfs_z; const float typical_p = params.typical_p; const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n; const float repeat_penalty = params.repeat_penalty; const float alpha_presence = params.presence_penalty; const float alpha_frequency = params.frequency_penalty; const int mirostat = params.mirostat; const float mirostat_tau = params.mirostat_tau; const float mirostat_eta = params.mirostat_eta; const bool penalize_nl = params.penalize_nl; // Eval until space runs out auto out_count = 0; printf("<bot>:"); while (space > 0) { // Get token gptneox_token id = 0; { auto logits = gptneox_get_logits(ctx); // Apply params.logit_bias map for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) { logits[it->first] += it->second; } std::vector<gptneox_token_data> candidates; candidates.reserve(n_vocab); for (gptneox_token token_id = 0; token_id < n_vocab; token_id++) { candidates.emplace_back(gptneox_token_data{token_id, logits[token_id], 0.0f}); } gptneox_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; // Apply penalties gptneox_token nl_token = gptneox_str_to_token(ctx, "\n"); float nl_logit = logits[nl_token]; auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx); gptneox_sample_repetition_penalty(ctx, &candidates_p, last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, last_n_repeat, repeat_penalty); gptneox_sample_frequency_and_presence_penalties(ctx, &candidates_p, last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, last_n_repeat, alpha_frequency, alpha_presence); if (!penalize_nl) { logits[nl_token] = nl_logit; } if (temp <= 0) { // Greedy sampling id = gptneox_sample_token_greedy(ctx, &candidates_p); } else { if (mirostat == 1) { static float mirostat_mu = 2.0f * mirostat_tau; const int mirostat_m = 100; gptneox_sample_temperature(ctx, &candidates_p, temp); id = gptneox_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu); } else if (mirostat == 2) { static float mirostat_mu = 2.0f * mirostat_tau; gptneox_sample_temperature(ctx, &candidates_p, temp); id = gptneox_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu); } else { // Temperature sampling gptneox_sample_top_k(ctx, &candidates_p, top_k, 1); gptneox_sample_tail_free(ctx, &candidates_p, tfs_z, 1); gptneox_sample_typical(ctx, &candidates_p, typical_p, 1); gptneox_sample_top_p(ctx, &candidates_p, top_p, 1); gptneox_sample_temperature(ctx, &candidates_p, temp); id = gptneox_sample_token(ctx, &candidates_p); } } } // Inc out count and dec space out_count += 1; space -= 1; // Repeat tokens update last_n_tokens.push_back(id); if (last_n_tokens.size() > params.repeat_last_n) { last_n_tokens.erase(last_n_tokens.begin()); } // Redpajama: check if the interactive is done. //std::cout<<" last_n_tokens.size: "<< last_n_tokens[0] <<" "<< last_n_tokens[1] <<" "<< last_n_tokens[2] << std::endl; if (last_n_tokens.size()==3 && last_n_tokens[0]==gptneox_str_to_token(ctx, "<") && last_n_tokens[1]==gptneox_str_to_token(ctx, "human") && last_n_tokens[2]==gptneox_str_to_token(ctx, ">:")){ space = 0; continue; } // Check for eos - end early - check eos before bos in case they are the same if (id == gptneox_token_eos()) { space = 0; continue; } // Check for bos - skip callback if so if (id == gptneox_token_bos()) { continue; } if (last_n_tokens[2]==gptneox_str_to_token(ctx, "<")){ ; } else if (last_n_tokens[2]==gptneox_str_to_token(ctx, "human")){ if (last_n_tokens[1]==gptneox_str_to_token(ctx, "<")){ ; } else{ printf("%s", gptneox_token_to_str(ctx, id)); } } else if (last_n_tokens[1]==gptneox_str_to_token(ctx, "<")){ printf("<"); printf("%s", gptneox_token_to_str(ctx, id)); } else{ printf("%s", gptneox_token_to_str(ctx, id)); } fflush(stdout); // Check if we need to run another eval if (space > 0) { // Send generated token back into model for next generation if (gptneox_eval(ctx, &id, 1, n_past, params.n_threads)) { fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } // Increment past count n_past += 1; } // Check for user interrupt if (is_interacting) { space = 0; } } printf("\n"); fflush(stdout); } gptneox_print_timings(ctx); gptneox_free(ctx); set_console_color(con_st, CONSOLE_COLOR_DEFAULT); return 0; }
16,545
390
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/scripts/convert_gptneox_to_ggml.py
// clang-format off # Convert Hugging Face fine-tuned gpt-neox-like models to ggml format import io import os import sys import struct import json import code import torch import numpy as np from transformers import AutoModelForCausalLM, AutoTokenizer # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8+n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) if len(sys.argv) < 3: print("Usage: python convert-hf-to-ggml.py model_name dir-output [use-f32]") print(" model_name: name of the model to convert. Example: 'bigscience/bloomz-560m'") print(" dir-output: directory where the output file will be written") print(" use-f32: if present, use float32 instead of float16") sys.exit(1) model_name = sys.argv[1] dir_out = sys.argv[2] model_cache_dir = dir_out + "-cache" # make sure the output directory exists os.makedirs(dir_out, exist_ok=True) # possible data types # ftype == 0 -> float32 # ftype == 1 -> float16 # # map from ftype to string ftype_str = ["f32", "f16"] ftype = 1 if len(sys.argv) > 3: ftype = 0 tokenizer = AutoTokenizer.from_pretrained(model_name) print("Loading model: ", model_name) model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16 if ftype == 1 else torch.float32, cache_dir=model_cache_dir) model.eval() for p in model.parameters(): p.requires_grad = False hparams = model.config.to_dict() print("Model loaded: ", model_name) fn_bin = f"/ggml-{model_name.split('/')[-1]}-{ftype_str[ftype]}.bin" fn_out = dir_out + fn_bin fout = open(fn_out, "wb") ggml_file_magic = 0x67676d66 # 0x67676d6c is unversioned ggml_file_version = 0x00000001 # v1 hparams["multiple_of"] = 1 fout.write(struct.pack("i", ggml_file_magic)) # magic: ggmf in hex fout.write(struct.pack("i", ggml_file_version)) fout.write(struct.pack("i", hparams["vocab_size"])) fout.write(struct.pack("i", hparams["max_position_embeddings"])) fout.write(struct.pack("i", hparams["hidden_size"])) fout.write(struct.pack("i", hparams["num_attention_heads"])) fout.write(struct.pack("i", hparams["num_hidden_layers"])) fout.write(struct.pack("i", int((hparams["hidden_size"] / hparams["num_attention_heads"] ) * hparams["rotary_pct"]))) # rotary_dim fout.write(struct.pack("i", int(hparams["use_parallel_residual"]))) fout.write(struct.pack("i", ftype)) # Is this correct?? dot_token = tokenizer.encode(".")[0] for i in range(hparams["vocab_size"]): text = tokenizer.decode([i]).encode('utf-8') fout.write(struct.pack("i", len(text))) fout.write(text) list_vars = model.state_dict() print(hparams) for name in list_vars.keys(): if name.startswith('gpt_neox.layers.'): if 'attention.masked_bias' in name or \ 'attention.rotary_emb.inv_freq' in name or \ 'attention.bias' in name: continue # No gradients for these list_vars[name].requires_grad = False src = name nn = name print(src, ' -> ', name) data = list_vars[src].squeeze().numpy() data = data.astype(np.float32) n_dims = len(data.shape) print(name, n_dims, data.shape) # default type is fp32 ftype_cur = 0 if ftype == 1 and n_dims > 1: print(" Converting to float16", data.shape, data[:3, :3].tolist()) data = data.astype(np.float16) ftype_cur = 1 else: print(" Converting to float32", data.shape, data[:3, :3].tolist() if n_dims > 1 else data[:3].tolist()) data = data.astype(np.float32) # header str = name.encode('utf-8') fout.write(struct.pack("iii", n_dims, len(str), ftype_cur)) for i in range(n_dims): fout.write(struct.pack("i", data.shape[n_dims - 1 - i])) print(str) fout.write(str) # data data.tofile(fout) fout.close() print("Done. Output file: " + fn_out) print("")
4,778
146
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/scripts/install-RedPajama-INCITE-Instruct-3B-v1.sh
// clang-format off #!/bin/bash # cd to scripts dir cd `dirname $0` # download model to models dir echo "Downloading model" python ./convert_gptneox_to_ggml.py togethercomputer/RedPajama-INCITE-Instruct-3B-v1 ../models/pythia # remove temp cache dir echo "Removing temp cache dir" rm -r ../models/pythia-cache # quantize model echo "Quantizing model (q4_0)" cd ../../.. python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Instruct-3B-v1-f16.bin # done! echo "Done."
532
22
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/scripts/install-RedPajama-INCITE-Base-3B-v1.sh
// clang-format off #!/bin/bash # cd to scripts dir cd `dirname $0` # download model to models dir echo "Downloading model" python ./convert_gptneox_to_ggml.py togethercomputer/RedPajama-INCITE-Base-3B-v1 ../models/pythia # remove temp cache dir echo "Removing temp cache dir" rm -r ../models/pythia-cache # quantize model echo "Quantizing model (q4_0)" cd ../../.. python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Base-3B-v1-f16.bin # done! echo "Done."
524
22
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/scripts/quantize-gptneox.py
// clang-format off #!/usr/bin/env python3 """Script to execute the "quantize" script on a given set of models.""" import subprocess import argparse import glob import sys import os def main(): """Update the quantize binary name depending on the platform and parse the command line arguments and execute the script. """ if "linux" in sys.platform or "darwin" in sys.platform: quantize_script_binary = "quantize-gptneox" elif "win32" in sys.platform or "cygwin" in sys.platform: quantize_script_binary = "quantize-gptneox.exe" else: print("WARNING: Unknown platform. Assuming a UNIX-like OS.\n") quantize_script_binary = "quantize-gptneox" parser = argparse.ArgumentParser( prog='python3 quantize-gptneox.py', description='This script quantizes the given models by applying the ' f'"{quantize_script_binary}" script on them.' ) parser.add_argument('model_path') #parser.add_argument( # 'models', nargs='+', choices=('7B', '13B', '30B', '65B'), # help='The models to quantize.' #) parser.add_argument( '-r', '--remove-16', action='store_true', dest='remove_f16', help='Remove the f16 model after quantizing it.' ) #parser.add_argument( # '-m', '--models-path', dest='models_path', # default=os.path.join(os.getcwd(), "models"), # help='Specify the directory where the models are located.' #) parser.add_argument( '-q', '--quantize-script-path', dest='quantize_script_path', default=os.path.join(os.getcwd(), quantize_script_binary), help='Specify the path to the "quantize" script.' ) parser.add_argument( '--quantize-output-type', dest='quantize_output_type', type=str, default='q4_0', help='Specify the path to the "quantize" script.' ) # TODO: Revise this code # parser.add_argument( # '-t', '--threads', dest='threads', type='int', # default=os.cpu_count(), # help='Specify the number of threads to use to quantize many models at ' # 'once. Defaults to os.cpu_count().' # ) args = parser.parse_args() args.model_path = os.path.abspath(args.model_path) #args.models_path = os.path.abspath(args.models_path) if not os.path.isfile(args.quantize_script_path): print( f'The "{quantize_script_binary}" script was not found in the ' "current location.\nIf you want to use it from another location, " "set the --quantize-script-path argument from the command line." ) sys.exit(1) #for model in args.models: # The model is separated in various parts # (ggml-model-f16.bin, ggml-model-f16.bin.0, ggml-model-f16.bin.1...) #f16_model_path_base = os.path.join( # args.models_path, model, "ggml-model-f16.bin" #) f16_model_path_base = args.model_path if not os.path.isfile(f16_model_path_base): print(f'The file %s was not found' % f16_model_path_base) sys.exit(1) f16_model_parts_paths = map( lambda filename: os.path.join(f16_model_path_base, filename), glob.glob(f"{f16_model_path_base}*") ) for f16_model_part_path in f16_model_parts_paths: if not os.path.isfile(f16_model_part_path): print( f"The f16 model {os.path.basename(f16_model_part_path)} " f"was not found in {args.models_path}{os.path.sep}" ". If you want to use it from another location, set the " "--models-path argument from the command line." ) sys.exit(1) __run_quantize_script( args.quantize_script_path, f16_model_part_path, args.quantize_output_type ) if args.remove_f16: os.remove(f16_model_part_path) # This was extracted to a top-level function for parallelization, if # implemented. See https://github.com/ggerganov/llama.cpp/pull/222/commits/f8db3d6cd91bf1a1342db9d29e3092bc12dd783c#r1140496406 def __run_quantize_script(script_path, f16_model_part_path, quantize_output_type): """Run the quantize script specifying the path to it and the path to the f16 model to quantize. """ new_quantized_model_path = f16_model_part_path.replace("f16", quantize_output_type) subprocess.run( [script_path, f16_model_part_path, new_quantized_model_path, quantize_output_type], check=True ) if __name__ == "__main__": try: main() except subprocess.CalledProcessError: print("\nAn error ocurred while trying to quantize the models.") sys.exit(1) except KeyboardInterrupt: sys.exit(0) else: print("\nSuccesfully quantized all models.")
4,793
142
jart/cosmopolitan
false
cosmopolitan/third_party/radpajama/scripts/install-RedPajama-INCITE-Chat-3B-v1.sh
// clang-format off #!/bin/bash # cd to scripts dir cd `dirname $0` # download model to models dir echo "Downloading model" python ./convert_gptneox_to_ggml.py togethercomputer/RedPajama-INCITE-Chat-3B-v1 ../models/pythia # remove temp cache dir echo "Removing temp cache dir" rm -r ../models/pythia-cache # quantize model echo "Quantizing model (q4_0)" cd ../../.. python ./examples/redpajama/scripts/quantize-gptneox.py ./examples/redpajama/models/pythia/ggml-RedPajama-INCITE-Chat-3B-v1-f16.bin # done! echo "Done."
524
22
jart/cosmopolitan
false
cosmopolitan/third_party/smallz4/smallz4.mk
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐ #───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘ PKGS += THIRD_PARTY_SMALLZ4 THIRD_PARTY_SMALLZ4_SRCS = $(THIRD_PARTY_SMALLZ4_A_SRCS) THIRD_PARTY_SMALLZ4_HDRS = $(THIRD_PARTY_SMALLZ4_A_HDRS) THIRD_PARTY_SMALLZ4_BINS = $(THIRD_PARTY_SMALLZ4_COMS) $(THIRD_PARTY_SMALLZ4_COMS:%=%.dbg) THIRD_PARTY_SMALLZ4_ARTIFACTS += THIRD_PARTY_SMALLZ4_A THIRD_PARTY_SMALLZ4 = $(THIRD_PARTY_SMALLZ4_A_DEPS) $(THIRD_PARTY_SMALLZ4_A) THIRD_PARTY_SMALLZ4_A = o/$(MODE)/third_party/smallz4/smallz4.a THIRD_PARTY_SMALLZ4_A_FILES := $(wildcard third_party/smallz4/*) THIRD_PARTY_SMALLZ4_A_HDRS = $(filter %.hh,$(THIRD_PARTY_SMALLZ4_A_FILES)) THIRD_PARTY_SMALLZ4_A_SRCS_S = $(filter %.S,$(THIRD_PARTY_SMALLZ4_A_FILES)) THIRD_PARTY_SMALLZ4_A_SRCS_C = $(filter %.c,$(THIRD_PARTY_SMALLZ4_A_FILES)) THIRD_PARTY_SMALLZ4_A_SRCS_CC = $(filter %.cc,$(THIRD_PARTY_SMALLZ4_A_FILES)) THIRD_PARTY_SMALLZ4_A_SRCS = \ $(THIRD_PARTY_SMALLZ4_A_SRCS_S) \ $(THIRD_PARTY_SMALLZ4_A_SRCS_C) \ $(THIRD_PARTY_SMALLZ4_A_SRCS_CC) THIRD_PARTY_SMALLZ4_A_OBJS = \ $(THIRD_PARTY_SMALLZ4_A_SRCS_S:%.S=o/$(MODE)/%.o) \ $(THIRD_PARTY_SMALLZ4_A_SRCS_C:%.c=o/$(MODE)/%.o) \ $(THIRD_PARTY_SMALLZ4_A_SRCS_CC:%.cc=o/$(MODE)/%.o) THIRD_PARTY_SMALLZ4_A_DIRECTDEPS = \ LIBC_FMT \ LIBC_INTRIN \ LIBC_LOG \ LIBC_MEM \ LIBC_NEXGEN32E \ LIBC_RUNTIME \ LIBC_CALLS \ LIBC_STDIO \ LIBC_STR \ LIBC_STUBS \ THIRD_PARTY_LIBCXX THIRD_PARTY_SMALLZ4_A_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_SMALLZ4_A_DIRECTDEPS),$($(x)))) THIRD_PARTY_SMALLZ4_A_CHECKS = \ $(THIRD_PARTY_SMALLZ4_A).pkg \ $(THIRD_PARTY_SMALLZ4_A_HDRS:%=o/$(MODE)/%.ok) $(THIRD_PARTY_SMALLZ4_A): \ third_party/smallz4/ \ $(THIRD_PARTY_SMALLZ4_A).pkg \ $(THIRD_PARTY_SMALLZ4_A_OBJS) $(THIRD_PARTY_SMALLZ4_A).pkg: \ $(THIRD_PARTY_SMALLZ4_A_OBJS) \ $(foreach x,$(THIRD_PARTY_SMALLZ4_A_DIRECTDEPS),$($(x)_A).pkg) o/$(MODE)/third_party/smallz4/smallz4.com.dbg: \ $(THIRD_PARTY_SMALLZ4) \ o/$(MODE)/third_party/smallz4/smallz4.o \ $(CRT) \ $(APE_NO_MODIFY_SELF) @$(APELINK) o/$(MODE)/third_party/smallz4/smallz4cat.com.dbg: \ $(THIRD_PARTY_SMALLZ4) \ o/$(MODE)/third_party/smallz4/smallz4cat.o \ $(CRT) \ $(APE_NO_MODIFY_SELF) @$(APELINK) THIRD_PARTY_SMALLZ4_COMS = \ o/$(MODE)/third_party/smallz4/smallz4.com \ o/$(MODE)/third_party/smallz4/smallz4cat.com THIRD_PARTY_SMALLZ4_LIBS = $(foreach x,$(THIRD_PARTY_SMALLZ4_ARTIFACTS),$($(x))) THIRD_PARTY_SMALLZ4_SRCS = $(foreach x,$(THIRD_PARTY_SMALLZ4_ARTIFACTS),$($(x)_SRCS)) THIRD_PARTY_SMALLZ4_CHECKS = $(foreach x,$(THIRD_PARTY_SMALLZ4_ARTIFACTS),$($(x)_CHECKS)) THIRD_PARTY_SMALLZ4_OBJS = $(foreach x,$(THIRD_PARTY_SMALLZ4_ARTIFACTS),$($(x)_OBJS)) $(THIRD_PARTY_SMALLZ4_OBJS): $(BUILD_FILES) third_party/smallz4/smallz4.mk .PHONY: o/$(MODE)/third_party/smallz4 o/$(MODE)/third_party/smallz4: \ $(THIRD_PARTY_SMALLZ4_BINS) \ $(THIRD_PARTY_SMALLZ4_CHECKS)
3,139
86
jart/cosmopolitan
false
cosmopolitan/third_party/smallz4/smallz4.hh
#ifndef COSMOPOLITAN_THIRD_PARTY_SMALLZ4_SMALLZ4_H_ #define COSMOPOLITAN_THIRD_PARTY_SMALLZ4_SMALLZ4_H_ #include "libc/intrin/bits.h" #include "third_party/libcxx/vector" /** * LZ4 compression with optimal parsing * * See smallz4.cc for a basic I/O interface you can easily replace it by * a in-memory version then all you have to do is: * * smallz4::lz4(GET_BYTES, SEND_BYTES); * * For more advanced stuff, you can call lz4 with up to four parameters * (incl. max chain length and a dictionary) */ class smallz4 { public: // read several bytes, see getBytesFromIn() in smallz4.cpp for a basic // implementation typedef size_t (*GET_BYTES)(void* data, size_t numBytes, void* userPtr); // write several bytes, see sendBytesToOut() in smallz4.cpp for a basic // implementation typedef void (*SEND_BYTES)(const void* data, size_t numBytes, void* userPtr); /// compress everything in input stream (accessed via getByte) and write to /// output stream (via send) static void lz4(GET_BYTES getBytes, SEND_BYTES sendBytes, unsigned short maxChainLength = MaxChainLength, bool useLegacyFormat = false, void* userPtr = NULL) { lz4(getBytes, sendBytes, maxChainLength, std::vector<unsigned char>(), useLegacyFormat, userPtr); } /// compress everything in input stream (accessed via getByte) and write to /// output stream (via send) static void lz4( GET_BYTES getBytes, SEND_BYTES sendBytes, unsigned short maxChainLength, const std::vector<unsigned char>& dictionary, // predefined dictionary bool useLegacyFormat = false, // old format is 7 bytes smaller if input < 8 MB void* userPtr = NULL) { smallz4 obj(maxChainLength); obj.compress(getBytes, sendBytes, dictionary, useLegacyFormat, userPtr); } /// version string static const char* const getVersion() { return "1.5"; } // compression level thresholds, made public because I display them in the // help screen ... enum { /// greedy mode for short chains (compression level <= 3) instead of optimal /// parsing / lazy evaluation ShortChainsGreedy = 3, /// lazy evaluation for medium-sized chains (compression level > 3 and <= 6) ShortChainsLazy = 6 }; // ----- END OF PUBLIC INTERFACE ----- private: // ----- constants and types ----- /// a block can be up to 4 MB, so uint32_t would suffice but uint64_t is quite /// a bit faster on my x64 machine typedef uint64_t Length; /// matches must start within the most recent 64k typedef uint16_t Distance; enum { /// each match's length must be >= 4 MinMatch = 4, /// a literal needs one byte JustLiteral = 1, /// last match must not be closer than 12 bytes to the end BlockEndNoMatch = 12, /// last 5 bytes must be literals, no matching allowed BlockEndLiterals = 5, /// match finder's hash table size (2^HashBits entries, must be less than /// 32) HashBits = 20, HashSize = 1 << HashBits, /// input buffer size, can be any number but zero ;-) BufferSize = 1024, /// maximum match distance, must be power of 2 minus 1 MaxDistance = 65535, /// marker for "no match" EndOfChain = 0, /// stop match finding after MaxChainLength steps (default is unlimited => /// optimal parsing) MaxChainLength = MaxDistance, /// significantly speed up parsing if the same byte is repeated a lot, may /// cause sub-optimal compression MaxSameLetter = 19 + 255 * 256, // was: 19 + 255, /// maximum block size as defined in LZ4 spec: { /// 0,0,0,0,64*1024,256*1024,1024*1024,4*1024*1024 } I only work with the /// biggest maximum block size (7) // note: xxhash header checksum is precalculated only for 7, too MaxBlockSizeId = 7, MaxBlockSize = 4 * 1024 * 1024, /// legacy format has a fixed block size of 8 MB MaxBlockSizeLegacy = 8 * 1024 * 1024, /// number of literals and match length is encoded in several bytes, max. /// 255 per byte MaxLengthCode = 255 }; // ----- one and only variable ... ----- /// how many matches are checked in findLongestMatch, lower values yield /// faster encoding at the cost of worse compression ratio unsigned short maxChainLength; // ----- code ----- /// match struct Match { /// length of match Length length; /// start of match Distance distance; }; /// create new compressor (only invoked by lz4) explicit smallz4(unsigned short newMaxChainLength = MaxChainLength) : maxChainLength(newMaxChainLength) // => no limit, but can be changed by // setMaxChainLength { } /// return true, if the four bytes at *a and *b match inline static bool match4(const void* const a, const void* const b) { return READ32LE(a) == READ32LE(b); } /// simple hash function, input: 32 bits, output: HashBits bits (by default: /// 20) inline static uint32_t getHash32(uint32_t fourBytes) { // taken from https://en.wikipedia.org/wiki/Linear_congruential_generator const uint32_t HashMultiplier = 48271; return ((fourBytes * HashMultiplier) >> (32 - HashBits)) & (HashSize - 1); } /// find longest match of data[pos] between data[begin] and data[end], use /// match chain Match findLongestMatch(const unsigned char* const data, uint64_t pos, uint64_t begin, uint64_t end, const Distance* const chain) const { Match result; result.length = JustLiteral; // assume a literal => one byte // compression level: look only at the first n entries of the match chain unsigned short stepsLeft = maxChainLength; // findLongestMatch() shouldn't be called when maxChainLength = 0 // (uncompressed) // pointer to position that is currently analyzed (which we try to find a // great match for) const unsigned char* const current = data + pos - begin; // don't match beyond this point const unsigned char* const stop = current + end - pos; // get distance to previous match, abort if 0 => not existing Distance distance = chain[pos & MaxDistance]; int64_t totalDistance = 0; while (distance != EndOfChain) { // chain goes too far back ? totalDistance += distance; if (totalDistance > MaxDistance) break; // can't match beyond 64k // prepare next position distance = chain[(pos - totalDistance) & MaxDistance]; // let's introduce a new pointer atLeast that points to the first "new" // byte of a potential longer match const unsigned char* const atLeast = current + result.length + 1; // impossible to find a longer match because not enough bytes left ? if (atLeast > stop) break; // the idea is to split the comparison algorithm into 2 phases // (1) scan backward from atLeast to current, abort if mismatch // (2) scan forward until a mismatch is found and store length/distance // of this new best match current atLeast // | | // -<<<<<<<< phase 1 <<<<<<<< // >>> phase 2 >>> // main reason for phase 1: // - both byte sequences start with the same bytes, quite likely they are // very similar // - there is a good chance that if they differ, then their last bytes // differ // => checking the last first increases the probability that a mismatch is // detected as early as possible // compare 4 bytes at once const Length CheckAtOnce = 4; // all bytes between current and atLeast shall be identical const unsigned char* phase1 = atLeast - CheckAtOnce; // minus 4 because match4 checks 4 bytes while (phase1 > current && match4(phase1, phase1 - totalDistance)) phase1 -= CheckAtOnce; // note: - the first four bytes always match // - in the last iteration, phase1 points either at current + 1 or // current + 2 or current + 3 // - therefore we compare a few bytes twice => but a check to skip // these checks is more expensive // mismatch ? (the while-loop was aborted) if (phase1 > current) continue; // we have a new best match, now scan forward const unsigned char* phase2 = atLeast; // fast loop: check four bytes at once while (phase2 + CheckAtOnce <= stop && match4(phase2, phase2 - totalDistance)) phase2 += CheckAtOnce; // slow loop: check the last 1/2/3 bytes while (phase2 < stop && *phase2 == *(phase2 - totalDistance)) phase2++; // store new best match result.distance = Distance(totalDistance); result.length = Length(phase2 - current); // stop searching on lower compression levels if (--stepsLeft == 0) break; } return result; } /// create shortest output /** data points to block's begin; we need it to extract literals **/ static std::vector<unsigned char> selectBestMatches( const std::vector<Match>& matches, const unsigned char* const data) { // store encoded data std::vector<unsigned char> result; result.reserve(matches.size()); // indices of current run of literals size_t literalsFrom = 0; size_t numLiterals = 0; bool lastToken = false; // walk through the whole block for (size_t offset = 0; offset < matches.size();) // increment inside of loop { // get best cost-weighted match Match match = matches[offset]; // if no match, then count literals instead if (match.length <= JustLiteral) { // first literal ? need to reset pointers of current sequence of // literals if (numLiterals == 0) literalsFrom = offset; // add one more literal to current sequence numLiterals++; // next match offset++; // continue unless it's the last literal if (offset < matches.size()) continue; lastToken = true; } else { // skip unused matches offset += match.length; } // store match length (4 is implied because it's the minimum match length) int matchLength = int(match.length) - MinMatch; // last token has zero length if (lastToken) matchLength = 0; // token consists of match length and number of literals, let's start with // match length ... unsigned char token = (matchLength < 15) ? (unsigned char)matchLength : 15; // >= 15 literals ? (extra bytes to store length) if (numLiterals < 15) { // add number of literals in higher four bits token |= numLiterals << 4; result.push_back(token); } else { // set all higher four bits, the following bytes with determine the // exact number of literals result.push_back(token | 0xF0); // 15 is already encoded in token int encodeNumLiterals = int(numLiterals) - 15; // emit 255 until remainder is below 255 while (encodeNumLiterals >= MaxLengthCode) { result.push_back(MaxLengthCode); encodeNumLiterals -= MaxLengthCode; } // and the last byte (can be zero, too) result.push_back((unsigned char)encodeNumLiterals); } // copy literals if (numLiterals > 0) { result.insert(result.end(), data + literalsFrom, data + literalsFrom + numLiterals); // last token doesn't have a match if (lastToken) break; // reset numLiterals = 0; } // distance stored in 16 bits / little endian result.push_back(match.distance & 0xFF); result.push_back(match.distance >> 8); // >= 15+4 bytes matched if (matchLength >= 15) { // 15 is already encoded in token matchLength -= 15; // emit 255 until remainder is below 255 while (matchLength >= MaxLengthCode) { result.push_back(MaxLengthCode); matchLength -= MaxLengthCode; } // and the last byte (can be zero, too) result.push_back((unsigned char)matchLength); } } return result; } /// walk backwards through all matches and compute number of compressed bytes /// from current position to the end of the block /** note: matches are modified (shortened length) if necessary **/ static void estimateCosts(std::vector<Match>& matches) { const size_t blockEnd = matches.size(); // equals the number of bytes after compression typedef uint32_t Cost; // minimum cost from this position to the end of the current block std::vector<Cost> cost(matches.size(), 0); // "cost" represents the number of bytes needed // the last bytes must always be literals Length numLiterals = BlockEndLiterals; // backwards optimal parsing for (int64_t i = (int64_t)blockEnd - (1 + BlockEndLiterals); i >= 0; i--) // ignore the last 5 bytes, they are always literals { // if encoded as a literal numLiterals++; Length bestLength = JustLiteral; // such a literal "costs" 1 byte Cost minCost = cost[i + 1] + JustLiteral; // an extra length byte is required for every 255 literals if (numLiterals >= 15) { // same as: if ((numLiterals - 15) % MaxLengthCode == 0) // but I try hard to avoid the slow modulo function if (numLiterals == 15 || (numLiterals >= 15 + MaxLengthCode && (numLiterals - 15) % MaxLengthCode == 0)) minCost++; } // let's look at the longest match, almost always more efficient that the // plain literals Match match = matches[i]; // very long self-referencing matches can slow down the program A LOT if (match.length >= MaxSameLetter && match.distance == 1) { // assume that longest match is always the best match // NOTE: this assumption might not be optimal ! bestLength = match.length; minCost = cost[i + match.length] + 1 + 2 + 1 + Cost(match.length - 19) / 255; } else { // this is the core optimization loop // overhead of encoding a match: token (1 byte) + offset (2 bytes) + // sometimes extra bytes for long matches Cost extraCost = 1 + 2; Length nextCostIncrease = 18; // need one more byte for 19+ long // matches (next increase: 19+255*x) // try all match lengths (start with short ones) for (Length length = MinMatch; length <= match.length; length++) { // token (1 byte) + offset (2 bytes) + extra bytes for long matches Cost currentCost = cost[i + length] + extraCost; // better choice ? if (currentCost <= minCost) { // regarding the if-condition: // "<" prefers literals and shorter matches // "<=" prefers longer matches // they should produce the same number of bytes (because of the same // cost) // ... but every now and then it doesn't ! // that's why: too many consecutive literals require an extra length // byte (which we took into consideration a few lines above) but we // only looked at literals beyond the current position if there are // many literal in front of the current position then it may be // better to emit a match with the same cost as the literals at the // current position // => it "breaks" the long chain of literals and removes the extra // length byte minCost = currentCost; bestLength = length; // performance-wise, a long match is usually faster during decoding // than multiple short matches on the other hand, literals are // faster than short matches as well (assuming same cost) } // very long matches need extra bytes for encoding match length if (length == nextCostIncrease) { extraCost++; nextCostIncrease += MaxLengthCode; } } } // store lowest cost so far cost[i] = minCost; // and adjust best match matches[i].length = bestLength; // reset number of literals if a match was chosen if (bestLength != JustLiteral) numLiterals = 0; // note: if bestLength is smaller than the previous matches[i].length then // there might be a closer match // which could be more cache-friendly (=> faster decoding) } } /// compress everything in input stream (accessed via getByte) and write to /// output stream (via send), improve compression with a predefined dictionary void compress(GET_BYTES getBytes, SEND_BYTES sendBytes, const std::vector<unsigned char>& dictionary, bool useLegacyFormat, void* userPtr) const { // ==================== write header ==================== if (useLegacyFormat) { // magic bytes const unsigned char header[] = {0x02, 0x21, 0x4C, 0x18}; sendBytes(header, sizeof(header), userPtr); } else { // frame header const unsigned char header[] = { 0x04, 0x22, 0x4D, 0x18, // magic bytes 1 << 6, // flags: no checksums, blocks depend on each other and no // dictionary ID MaxBlockSizeId << 4, // max blocksize 0xDF // header checksum (precomputed) }; sendBytes(header, sizeof(header), userPtr); } // ==================== declarations ==================== // change read buffer size as you like unsigned char buffer[BufferSize]; // read the file in chunks/blocks, data will contain only bytes which are // relevant for the current block std::vector<unsigned char> data; // file position corresponding to data[0] size_t dataZero = 0; // last already read position size_t numRead = 0; // passthru data ? (but still wrap it in LZ4 format) const bool uncompressed = (maxChainLength == 0); // last time we saw a hash const uint64_t NoLastHash = ~0; // = -1 std::vector<uint64_t> lastHash(HashSize, NoLastHash); // previous position which starts with the same bytes std::vector<Distance> previousHash( MaxDistance + 1, Distance(EndOfChain)); // long chains based on my simple hash std::vector<Distance> previousExact( MaxDistance + 1, Distance(EndOfChain)); // shorter chains based on exact matching of the // first four bytes // these two containers are essential for match finding: // 1. I compute a hash of four byte // 2. in lastHash is the location of the most recent block of four byte with // that same hash // 3. due to hash collisions, several groups of four bytes may yield the // same hash // 4. so for each location I can look up the previous location of the same // hash in previousHash // 5. basically it's a chain of memory locations where potential matches // start // 5. I follow this hash chain until I find exactly the same four bytes I // was looking for // 6. then I switch to a sparser chain: previousExact // 7. it's basically the same idea as previousHash but this time not the // hash but the first four bytes must be identical // 8. previousExact will be used by findLongestMatch: it compare all such // strings a figures out which is the longest match // And why do I have to do it in such a complicated way ? // - well, there are 2^32 combinations of four bytes // - so that there are 2^32 potential chains // - most combinations just don't occur and occupy no space but I still have // to keep their "entry point" (which are empty/invalid) // - that would be at least 16 GBytes RAM (2^32 x 4 bytes) // - my hashing algorithm reduces the 2^32 combinations to 2^20 hashes (see // hashBits), that's about 8 MBytes RAM // - thus only 2^20 entry points and at most 2^20 hash chains which is // easily manageable // ... in the end it's all about conserving memory ! // (total memory consumption of smallz4 is about 64 MBytes) // first and last offset of a block (nextBlock is end-of-block plus 1) uint64_t lastBlock = 0; uint64_t nextBlock = 0; bool parseDictionary = !dictionary.empty(); // main loop, processes one block per iteration while (true) { // ==================== start new block ==================== // first byte of the currently processed block (std::vector data may // contain the last 64k of the previous block, too) const unsigned char* dataBlock = NULL; // prepend dictionary if (parseDictionary) { // resize dictionary to 64k (minus 1 because we can only match the last // 65535 bytes of the dictionary => MaxDistance) if (dictionary.size() < MaxDistance) { // dictionary is smaller than 64k, prepend garbage data size_t unused = MaxDistance - dictionary.size(); data.resize(unused, 0); data.insert(data.end(), dictionary.begin(), dictionary.end()); } else // copy only the most recent 64k of the dictionary data.insert(data.end(), dictionary.begin() + dictionary.size() - MaxDistance, dictionary.end()); nextBlock = data.size(); numRead = data.size(); } // read more bytes from input size_t maxBlockSize = useLegacyFormat ? MaxBlockSizeLegacy : MaxBlockSize; while (numRead - nextBlock < maxBlockSize) { // buffer can be significantly smaller than MaxBlockSize, that's the // only reason for this while-block size_t incoming = getBytes(buffer, BufferSize, userPtr); // no more data ? if (incoming == 0) break; // add bytes to buffer numRead += incoming; data.insert(data.end(), buffer, buffer + incoming); } // no more data ? => WE'RE DONE ! if (nextBlock == numRead) break; // determine block borders lastBlock = nextBlock; nextBlock += maxBlockSize; // not beyond end-of-file if (nextBlock > numRead) nextBlock = numRead; // pointer to first byte of the currently processed block (the std::vector // container named data may contain the last 64k of the previous block, // too) dataBlock = &data[lastBlock - dataZero]; const uint64_t blockSize = nextBlock - lastBlock; // ==================== full match finder ==================== // greedy mode is much faster but produces larger output const bool isGreedy = (maxChainLength <= ShortChainsGreedy); // lazy evaluation: if there is a match, then try running match finder on // next position, too, but not after that const bool isLazy = !isGreedy && (maxChainLength <= ShortChainsLazy); // skip match finding on the next x bytes in greedy mode Length skipMatches = 0; // allow match finding on the next byte but skip afterwards (in lazy mode) bool lazyEvaluation = false; // the last literals of the previous block skipped matching, so they are // missing from the hash chains int64_t lookback = int64_t(dataZero); if (lookback > BlockEndNoMatch && !parseDictionary) lookback = BlockEndNoMatch; if (parseDictionary) lookback = int64_t(dictionary.size()); // so let's go back a few bytes lookback = -lookback; // ... but not in legacy mode if (useLegacyFormat || uncompressed) lookback = 0; std::vector<Match> matches(uncompressed ? 0 : blockSize); // find longest matches for each position (skip if level=0 which means // "uncompressed") int64_t i; for (i = lookback; i + BlockEndNoMatch <= int64_t(blockSize) && !uncompressed; i++) { // detect self-matching if (i > 0 && dataBlock[i] == dataBlock[i - 1]) { Match prevMatch = matches[i - 1]; // predecessor had the same match ? if (prevMatch.distance == 1 && prevMatch.length > MaxSameLetter) // TODO: handle very long // self-referencing matches { // just copy predecessor without further (expensive) optimizations matches[i].distance = 1; matches[i].length = prevMatch.length - 1; continue; } } // read next four bytes const uint32_t four = READ32LE(dataBlock + i); // convert to a shorter hash const uint32_t hash = getHash32(four); // get most recent position of this hash uint64_t lastHashMatch = lastHash[hash]; // and store current position lastHash[hash] = i + lastBlock; // remember: i could be negative, too Distance prevIndex = (i + MaxDistance + 1) & MaxDistance; // actually the same as i & MaxDistance // no predecessor / no hash chain available ? if (lastHashMatch == NoLastHash) { previousHash[prevIndex] = EndOfChain; previousExact[prevIndex] = EndOfChain; continue; } // most recent hash match too far away ? uint64_t distance = lastHash[hash] - lastHashMatch; if (distance > MaxDistance) { previousHash[prevIndex] = EndOfChain; previousExact[prevIndex] = EndOfChain; continue; } // build hash chain, i.e. store distance to last pseudo-match previousHash[prevIndex] = (Distance)distance; // skip pseudo-matches (hash collisions) and build a second chain where // the first four bytes must match exactly uint32_t currentFour; // check the hash chain while (true) { // read four bytes currentFour = READ32LE( &data[lastHashMatch - dataZero]); // match may be found in the // previous block, too // match chain found, first 4 bytes are identical if (currentFour == four) break; // prevent from accidently hopping on an old, wrong hash chain if (hash != getHash32(currentFour)) break; // try next pseudo-match Distance next = previousHash[lastHashMatch & MaxDistance]; // end of the hash chain ? if (next == EndOfChain) break; // too far away ? distance += next; if (distance > MaxDistance) break; // take another step along the hash chain ... lastHashMatch -= next; // closest match is out of range ? if (lastHashMatch < dataZero) break; } // search aborted / failed ? if (four != currentFour) { // no matches for the first four bytes previousExact[prevIndex] = EndOfChain; continue; } // store distance to previous match previousExact[prevIndex] = (Distance)distance; // no matching if crossing block boundary, just update hash tables if (i < 0) continue; // skip match finding if in greedy mode if (skipMatches > 0) { skipMatches--; if (!lazyEvaluation) continue; lazyEvaluation = false; } // and after all that preparation ... finally look for the longest match matches[i] = findLongestMatch(data.data(), i + lastBlock, dataZero, nextBlock - BlockEndLiterals, previousExact.data()); // no match finding needed for the next few bytes in greedy/lazy mode if ((isLazy || isGreedy) && matches[i].length != JustLiteral) { lazyEvaluation = (skipMatches == 0); skipMatches = matches[i].length; } } // last bytes are always literals while (i < int(matches.size())) matches[i++].length = JustLiteral; // dictionary is valid only to the first block parseDictionary = false; // ==================== estimate costs (number of compressed bytes) // ==================== // not needed in greedy mode and/or very short blocks if (matches.size() > BlockEndNoMatch && maxChainLength > ShortChainsGreedy) estimateCosts(matches); // ==================== select best matches ==================== std::vector<unsigned char> compressed = selectBestMatches(matches, &data[lastBlock - dataZero]); // ==================== output ==================== // did compression do harm ? bool useCompression = compressed.size() < blockSize && !uncompressed; // legacy format is always compressed useCompression |= useLegacyFormat; // block size uint32_t numBytes = uint32_t(useCompression ? compressed.size() : blockSize); uint32_t numBytesTagged = numBytes | (useCompression ? 0 : 0x80000000); unsigned char num1 = numBytesTagged & 0xFF; sendBytes(&num1, 1, userPtr); unsigned char num2 = (numBytesTagged >> 8) & 0xFF; sendBytes(&num2, 1, userPtr); unsigned char num3 = (numBytesTagged >> 16) & 0xFF; sendBytes(&num3, 1, userPtr); unsigned char num4 = (numBytesTagged >> 24) & 0xFF; sendBytes(&num4, 1, userPtr); if (useCompression) sendBytes(compressed.data(), numBytes, userPtr); else // uncompressed ? => copy input data sendBytes(&data[lastBlock - dataZero], numBytes, userPtr); // legacy format: no matching across blocks if (useLegacyFormat) { dataZero += data.size(); data.clear(); // clear hash tables for (size_t i = 0; i < previousHash.size(); i++) previousHash[i] = EndOfChain; for (size_t i = 0; i < previousExact.size(); i++) previousExact[i] = EndOfChain; for (size_t i = 0; i < lastHash.size(); i++) lastHash[i] = NoLastHash; } else { // remove already processed data except for the last 64kb which could be // used for intra-block matches if (data.size() > MaxDistance) { size_t remove = data.size() - MaxDistance; dataZero += remove; data.erase(data.begin(), data.begin() + remove); } } } // add an empty block if (!useLegacyFormat) { static const uint32_t zero = 0; sendBytes(&zero, 4, userPtr); } } }; #endif /* COSMOPOLITAN_THIRD_PARTY_SMALLZ4_SMALLZ4_H_ */
30,878
808
jart/cosmopolitan
false
cosmopolitan/third_party/smallz4/README.cosmo
Source: https://create.stephan-brumme.com/smallz4/ Date: 2022-03-22 License: MIT
87
7
jart/cosmopolitan
false
cosmopolitan/third_party/smallz4/smallz4cat.c
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ │vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ smallz4cat │ │ Copyright (c) 2016-2019 Stephan Brumme. All rights reserved. │ │ See https://create.stephan-brumme.com/smallz4/ │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/calls/calls.h" #include "libc/mem/mem.h" #include "libc/mem/gc.internal.h" #include "libc/runtime/runtime.h" #include "libc/stdio/stdio.h" #include "libc/str/str.h" /** * @fileoverview shorter, more readable, albeit slower re-implementation * of lz4cat ( https://github.com/Cyan4973/xxHash ) * * Limitations: * * - Skippable frames and legacy frames are not implemented (and most * likely never will) * * - Checksums are not verified (see https://create.stephan-brumme.com/xxhash/ * for a simple implementation) * * Replace getByteFromIn() and sendToOut() by your own code if you need * in-memory LZ4 decompression. Corrupted data causes a call to * unlz4error(). */ #define HISTORY_SIZE 65536 // don't change #define READ_BUFFER_SIZE 1024 // change at will static void unlz4error(const char* msg) { fputs("ERROR: ", stderr); fputs(msg, stderr); fputc('\n', stderr); exit(1); } typedef unsigned char (*GET_BYTE)(void*); typedef void (*SEND_BYTES)(const unsigned char*, unsigned int, void*); struct UserPtr { // file handles FILE* in; FILE* out; unsigned char readBuffer[READ_BUFFER_SIZE]; unsigned int pos; unsigned int available; }; /// read a single byte (with simple buffering) static unsigned char getByteFromIn(void* userPtr) { struct UserPtr* user = (struct UserPtr*)userPtr; if (user->pos == user->available) { user->pos = 0; user->available = fread(user->readBuffer, 1, READ_BUFFER_SIZE, user->in); if (user->available == 0) unlz4error("out of data"); } return user->readBuffer[user->pos++]; } /// write a block of bytes static void sendBytesToOut(const unsigned char* data, unsigned int numBytes, void* userPtr) { /// cast user-specific data struct UserPtr* user = (struct UserPtr*)userPtr; if (data != NULL && numBytes > 0) fwrite(data, 1, numBytes, user->out); } /// decompress everything in input stream (accessed via getByte) and write to /// output stream (via sendBytes) void unlz4_userPtr(GET_BYTE getByte, SEND_BYTES sendBytes, const char* dictionary, void* userPtr) { // signature unsigned char signature1 = getByte(userPtr); unsigned char signature2 = getByte(userPtr); unsigned char signature3 = getByte(userPtr); unsigned char signature4 = getByte(userPtr); unsigned int signature = (signature4 << 24) | (signature3 << 16) | (signature2 << 8) | signature1; unsigned char isModern = (signature == 0x184D2204); unsigned char isLegacy = (signature == 0x184C2102); if (!isModern && !isLegacy) unlz4error("invalid signature"); unsigned char hasBlockChecksum = false; unsigned char hasContentSize = false; unsigned char hasContentChecksum = false; unsigned char hasDictionaryID = false; if (isModern) { // flags unsigned char flags = getByte(userPtr); hasBlockChecksum = flags & 16; hasContentSize = flags & 8; hasContentChecksum = flags & 4; hasDictionaryID = flags & 1; // only version 1 file format unsigned char version = flags >> 6; if (version != 1) unlz4error("only LZ4 file format version 1 supported"); // ignore blocksize char numIgnore = 1; // ignore, skip 8 bytes if (hasContentSize) numIgnore += 8; // ignore, skip 4 bytes if (hasDictionaryID) numIgnore += 4; // ignore header checksum (xxhash32 of everything up this point & 0xFF) numIgnore++; // skip all those ignored bytes while (numIgnore--) getByte(userPtr); } // contains the latest decoded data unsigned char* history = gc(malloc(HISTORY_SIZE)); // next free position in history[] unsigned int pos = 0; // dictionary compression is a recently introduced feature, just move its // contents to the buffer if (dictionary != NULL) { // open dictionary FILE* dict = fopen(dictionary, "rb"); if (!dict) unlz4error("cannot open dictionary"); // get dictionary's filesize fseek(dict, 0, SEEK_END); long dictSize = ftell(dict); // only the last 64k are relevant long relevant = dictSize < 65536 ? 0 : dictSize - 65536; fseek(dict, relevant, SEEK_SET); if (dictSize > 65536) dictSize = 65536; // read it and store it at the end of the buffer fread(history + HISTORY_SIZE - dictSize, 1, dictSize, dict); fclose(dict); } // parse all blocks until blockSize == 0 while (1) { // block size unsigned int blockSize = getByte(userPtr); blockSize |= (unsigned int)getByte(userPtr) << 8; blockSize |= (unsigned int)getByte(userPtr) << 16; blockSize |= (unsigned int)getByte(userPtr) << 24; // highest bit set ? unsigned char isCompressed = isLegacy || (blockSize & 0x80000000) == 0; if (isModern) blockSize &= 0x7FFFFFFF; // stop after last block if (blockSize == 0) break; if (isCompressed) { // decompress block unsigned int blockOffset = 0; unsigned int numWritten = 0; while (blockOffset < blockSize) { // get a token unsigned char token = getByte(userPtr); blockOffset++; // determine number of literals unsigned int numLiterals = token >> 4; if (numLiterals == 15) { // number of literals length encoded in more than 1 byte unsigned char current; do { current = getByte(userPtr); numLiterals += current; blockOffset++; } while (current == 255); } blockOffset += numLiterals; // copy all those literals if (pos + numLiterals < HISTORY_SIZE) { // fast loop while (numLiterals-- > 0) history[pos++] = getByte(userPtr); } else { // slow loop while (numLiterals-- > 0) { history[pos++] = getByte(userPtr); // flush output buffer if (pos == HISTORY_SIZE) { sendBytes(history, HISTORY_SIZE, userPtr); numWritten += HISTORY_SIZE; pos = 0; } } } // last token has only literals if (blockOffset == blockSize) break; // match distance is encoded in two bytes (little endian) unsigned int delta = getByte(userPtr); delta |= (unsigned int)getByte(userPtr) << 8; // zero isn't allowed if (delta == 0) unlz4error("invalid offset"); blockOffset += 2; // match length (always >= 4, therefore length is stored minus 4) unsigned int matchLength = 4 + (token & 0x0F); if (matchLength == 4 + 0x0F) { unsigned char current; do // match length encoded in more than 1 byte { current = getByte(userPtr); matchLength += current; blockOffset++; } while (current == 255); } // copy match unsigned int referencePos = (pos >= delta) ? (pos - delta) : (HISTORY_SIZE + pos - delta); // start and end within the current 64k block ? if (pos + matchLength < HISTORY_SIZE && referencePos + matchLength < HISTORY_SIZE) { // read/write continuous block (no wrap-around at the end of // history[]) fast copy if (pos >= referencePos + matchLength || referencePos >= pos + matchLength) { // non-overlapping memcpy(history + pos, history + referencePos, matchLength); pos += matchLength; } else { // overlapping, slower byte-wise copy while (matchLength-- > 0) history[pos++] = history[referencePos++]; } } else { // either read or write wraps around at the end of history[] while (matchLength-- > 0) { // copy single byte history[pos++] = history[referencePos++]; // cannot write anymore ? => wrap around if (pos == HISTORY_SIZE) { // flush output buffer sendBytes(history, HISTORY_SIZE, userPtr); numWritten += HISTORY_SIZE; pos = 0; } // wrap-around of read location referencePos %= HISTORY_SIZE; } } } // all legacy blocks must be completely filled - except for the last one if (isLegacy && numWritten + pos < 8 * 1024 * 1024) break; } else { // copy uncompressed data and add to history, too (if next block is // compressed and some matches refer to this block) while (blockSize-- > 0) { // copy a byte ... history[pos++] = getByte(userPtr); // ... until buffer is full => send to output if (pos == HISTORY_SIZE) { sendBytes(history, HISTORY_SIZE, userPtr); pos = 0; } } } if (hasBlockChecksum) { // ignore checksum, skip 4 bytes getByte(userPtr); getByte(userPtr); getByte(userPtr); getByte(userPtr); } } if (hasContentChecksum) { // ignore checksum, skip 4 bytes getByte(userPtr); getByte(userPtr); getByte(userPtr); getByte(userPtr); } // flush output buffer sendBytes(history, pos, userPtr); } /// old interface where getByte and sendBytes use global file handles void unlz4(GET_BYTE getByte, SEND_BYTES sendBytes, const char* dictionary) { unlz4_userPtr(getByte, sendBytes, dictionary, NULL); } /// parse command-line int main(int argc, const char* argv[]) { // default input/output streams struct UserPtr user = {.in = stdin, .out = stdout, .pos = 0, // initial input buffer is empty .available = 0}; const char* dictionary = NULL; // first command-line parameter is our input filename / but ignore "-" which // stands for STDIN int parameter; for (parameter = 1; parameter < argc; parameter++) { const char* current = argv[parameter]; // dictionary if (current[0] == '-' && current[1] == 'D') { if (parameter + 1 >= argc) unlz4error("no dictionary filename found"); dictionary = argv[++parameter]; continue; } // filename // read from STDIN, default behavior if (current[0] != '-' && current[1] != '\0') { // already have a filename - at most one filename is allowed (except for // dictionary) ? if (user.in != stdin) unlz4error("can only decompress one file at a time"); // get handle user.in = fopen(argv[1], "rb"); if (!user.in) unlz4error("file not found"); } } // and go ! unlz4_userPtr(getByteFromIn, sendBytesToOut, dictionary, &user); return 0; }
13,336
359
jart/cosmopolitan
false
cosmopolitan/third_party/smallz4/stub.c
/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:2;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=2 sts=2 sw=2 fenc=utf-8 :vi│ ╞══════════════════════════════════════════════════════════════════════════════╡ │ Copyright 2022 Justine Alexandra Roberts Tunney │ │ │ │ Permission to use, copy, modify, and/or distribute this software for │ │ any purpose with or without fee is hereby granted, provided that the │ │ above copyright notice and this permission notice appear in all copies. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL │ │ WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED │ │ WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE │ │ AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL │ │ DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR │ │ PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER │ │ TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR │ │ PERFORMANCE OF THIS SOFTWARE. │ ╚─────────────────────────────────────────────────────────────────────────────*/ // file intentionally empty
1,865
21
jart/cosmopolitan
false
cosmopolitan/third_party/smallz4/smallz4.cc
/*-*- mode:c++;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ │vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ smallz4 │ │ Copyright (c) 2016-2019 Stephan Brumme. All rights reserved. │ │ See https://create.stephan-brumme.com/smallz4/ │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/calls/calls.h" #include "libc/calls/weirdtypes.h" #include "libc/runtime/runtime.h" #include "libc/stdio/stdio.h" #include "libc/time/time.h" #include "third_party/smallz4/smallz4.hh" /// error handler static void error(const char* msg, int code = 1) { fprintf(stderr, "ERROR: %s\n", msg); exit(code); } // ==================== user-specific I/O INTERFACE ==================== struct UserPtr { // file handles FILE* in; FILE* out; // the attributes below are just needed for verbose output bool verbose; uint64_t numBytesIn; uint64_t numBytesOut; uint64_t totalSize; time_t starttime; }; /// read several bytes and store at "data", return number of actually read bytes /// (return only zero if end of data reached) size_t getBytesFromIn(void* data, size_t numBytes, void* userPtr) { /// cast user-specific data UserPtr* user = (UserPtr*)userPtr; if (data && numBytes > 0) { size_t actual = fread(data, 1, numBytes, user->in); user->numBytesIn += actual; return actual; } return 0; } /// show verbose info on STDERR void verbose(const UserPtr& user) { if (!user.verbose) return; if (user.numBytesIn == 0) return; // elapsed and estimated time in seconds int duration = int(time(NULL) - user.starttime); if (duration == 0) return; int estimated = int(duration * user.totalSize / user.numBytesIn); // display on STDERR fprintf(stderr, "\r%lld bytes => %lld bytes (%d%%", user.numBytesIn, user.numBytesOut, 100 * user.numBytesOut / user.numBytesIn); if (estimated > 0) fprintf(stderr, ", %d%% done", 100 * duration / estimated); fprintf(stderr, "), after %d seconds @ %d kByte/s", duration, duration > 0 ? (user.numBytesIn / duration) / 1024 : 0); if (estimated > 0) fprintf(stderr, ", about %d seconds left ", estimated - duration); } /// write a block of bytes void sendBytesToOut(const void* data, size_t numBytes, void* userPtr) { /// cast user-specific data UserPtr* user = (UserPtr*)userPtr; if (data && numBytes > 0) { fwrite(data, 1, numBytes, user->out); user->numBytesOut += numBytes; if (user->verbose) verbose(*user); } } // ==================== COMMAND-LINE HANDLING ==================== // show simple help static void showHelp(const char* program) { printf( "smalLZ4 %s%s: compressor with optimal parsing, fully compatible with " "LZ4 by Yann Collet (see https://lz4.org)\n" "\n" "Basic usage:\n" " %s [flags] [input] [output]\n" "\n" "This program writes to STDOUT if output isn't specified\n" "and reads from STDIN if input isn't specified, either.\n" "\n" "Examples:\n" " %s < abc.txt > abc.txt.lz4 # use STDIN and STDOUT\n" " %s abc.txt > abc.txt.lz4 # read from file and write to STDOUT\n" " %s abc.txt abc.txt.lz4 # read from and write to file\n" " cat abc.txt | %s - abc.txt.lz4 # read from STDIN and write to file\n" " %s -6 abc.txt abc.txt.lz4 # compression level 6 (instead of " "default 9)\n" " %s -f abc.txt abc.txt.lz4 # overwrite an existing file\n" " %s -f7 abc.txt abc.txt.lz4 # compression level 7 and overwrite " "an existing file\n" "\n" "Flags:\n" " -0, -1 ... -9 Set compression level, default: 9 (see below)\n" " -h Display this help message\n" " -f Overwrite an existing file\n" " -l Use LZ4 legacy file format\n" " -D [FILE] Load dictionary\n" " -v Verbose\n" "\n" "Compression levels:\n" " -0 No compression\n" " -1 ... -%d Greedy search, check 1 to %d matches\n" " -%d ... -8 Lazy matching with optimal parsing, check %d to 8 " "matches\n" " -9 Optimal parsing, check all possible matches " "(default)\n" "\n" "Written in 2016-2020 by Stephan Brumme " "https://create.stephan-brumme.com/smallz4/\n", smallz4::getVersion(), "", program, program, program, program, program, program, program, program, smallz4::ShortChainsGreedy, smallz4::ShortChainsGreedy, smallz4::ShortChainsGreedy + 1, smallz4::ShortChainsGreedy + 1); } /// parse command-line int main(int argc, const char* argv[]) { // show help if no parameters and stdin isn't a pipe if (argc == 1 && isatty(fileno(stdin)) != 0) { showHelp(argv[0]); return 0; } unsigned short maxChainLength = 65535; // "unlimited" because search window contains only 2^16 bytes // overwrite output ? bool overwrite = false; // legacy format ? (not recommended, but smaller files if input < 8 MB) bool useLegacy = false; // preload dictionary from disk const char* dictionary = NULL; // default input/output streams UserPtr user; user.in = stdin; user.out = stdout; user.verbose = false; user.numBytesIn = 0; user.numBytesOut = 0; user.totalSize = 0; // parse flags int nextArgument = 1; bool skipArgument = false; while (argc > nextArgument && argv[nextArgument][0] == '-') { int argPos = 1; while (argv[nextArgument][argPos] != '\0') { switch (argv[nextArgument][argPos++]) { // show help case 'h': showHelp(argv[0]); return 0; // force overwrite case 'f': overwrite = true; break; // old LZ4 format case 'l': useLegacy = true; break; // use dictionary case 'D': if (nextArgument + 1 >= argc) error("no dictionary filename found"); dictionary = argv[nextArgument + 1]; // TODO: any flag immediately after -D causes an error skipArgument = true; break; // display some info on STDERR while compressing case 'v': user.verbose = true; break; // set compression level case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': maxChainLength = argv[nextArgument][1] - '0'; // "0" => 0, "1" => 1, ..., "8" => 8 break; // unlimited hash chain length case '9': // default maxChainLength is already "unlimited" break; default: error("unknown flag"); } } nextArgument++; if (skipArgument) nextArgument++; } // input file is given as first parameter or stdin if no parameter is given // (or "-") if (argc > nextArgument && argv[nextArgument][0] != '-') { user.in = fopen(argv[nextArgument], "rb"); if (!user.in) error("file not found"); nextArgument++; } // output file is given as second parameter or stdout if no parameter is given // (or "-") if (argc == nextArgument + 1 && argv[nextArgument][0] != '-') { // check if file already exists if (!overwrite && fopen(argv[nextArgument], "rb")) error("output file already exists"); user.out = fopen(argv[nextArgument], "wb"); if (!user.out) error("cannot create file"); } // basic check of legacy format's restrictions if (useLegacy) { if (dictionary != 0) error("legacy format doesn't support dictionaries"); if (maxChainLength == 0) error("legacy format doesn't support uncompressed files"); } // load dictionary std::vector<unsigned char> preload; if (dictionary != NULL) { // open dictionary FILE* dict = fopen(dictionary, "rb"); if (!dict) error("cannot open dictionary"); // get dictionary's filesize fseek(dict, 0, SEEK_END); size_t dictSize = ftell(dict); // only the last 64k are relevant const size_t Last64k = 65536; size_t relevant = dictSize < Last64k ? 0 : dictSize - Last64k; fseek(dict, (long)relevant, SEEK_SET); if (dictSize > Last64k) dictSize = Last64k; // read those bytes preload.resize(dictSize); fread(&preload[0], 1, dictSize, dict); fclose(dict); } if (user.verbose) { if (user.in != stdin) { fseek(user.in, 0, SEEK_END); user.totalSize = ftell(user.in); fseek(user.in, 0, SEEK_SET); } user.starttime = time(NULL); } // and go ! smallz4::lz4(getBytesFromIn, sendBytesToOut, maxChainLength, preload, useLegacy, &user); if (user.verbose && user.numBytesIn > 0) fprintf(stderr, "\r%lld bytes => %lld bytes (%d%%) after %d seconds " " \n", user.numBytesIn, user.numBytesOut, 100 * user.numBytesOut / user.numBytesIn, int(time(NULL) - user.starttime)); return 0; }
11,529
315
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/ffsdi2.c
/* clang-format off */ /* ===-- ffsdi2.c - Implement __ffsdi2 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __ffsdi2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: the index of the least significant 1-bit in a, or * the value zero if a is zero. The least significant bit is index one. */ COMPILER_RT_ABI si_int __ffsdi2(di_int a) { dwords x; x.all = a; if (x.s.low == 0) { if (x.s.high == 0) return 0; return __builtin_ctz(x.s.high) + (1 + sizeof(si_int) * CHAR_BIT); } return __builtin_ctz(x.s.low) + 1; }
1,029
37
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fp_trunc_common.inc
/* clang-format off */ //=== lib/fp_trunc.h - high precision -> low precision conversion *- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Set source and destination precision setting // //===----------------------------------------------------------------------===// #ifndef FP_TRUNC_HEADER #define FP_TRUNC_HEADER #include "third_party/compiler_rt/int_lib.h" #if defined SRC_SINGLE typedef float src_t; typedef uint32_t src_rep_t; #define SRC_REP_C UINT32_C static const int srcSigBits = 23; #elif defined SRC_DOUBLE typedef double src_t; typedef uint64_t src_rep_t; #define SRC_REP_C UINT64_C static const int srcSigBits = 52; #elif defined SRC_QUAD typedef long double src_t; typedef __uint128_t src_rep_t; #define SRC_REP_C (__uint128_t) static const int srcSigBits = 112; #else #error Source should be double precision or quad precision! #endif //end source precision #if defined DST_DOUBLE typedef double dst_t; typedef uint64_t dst_rep_t; #define DST_REP_C UINT64_C static const int dstSigBits = 52; #elif defined DST_SINGLE typedef float dst_t; typedef uint32_t dst_rep_t; #define DST_REP_C UINT32_C static const int dstSigBits = 23; #elif defined DST_HALF typedef uint16_t dst_t; typedef uint16_t dst_rep_t; #define DST_REP_C UINT16_C static const int dstSigBits = 10; #else #error Destination should be single precision or double precision! #endif //end destination precision // End of specialization parameters. Two helper routines for conversion to and // from the representation of floating-point data as integer values follow. static __inline src_rep_t srcToRep(src_t x) { const union { src_t f; src_rep_t i; } rep = {.f = x}; return rep.i; } static __inline dst_t dstFromRep(dst_rep_t x) { const union { dst_t f; dst_rep_t i; } rep = {.i = x}; return rep.f; } #endif // FP_TRUNC_HEADER
2,078
78
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fp_fixuint_impl.inc
/* clang-format off */ //===-- lib/fixdfsi.c - Double-precision -> integer conversion ----*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements float to unsigned integer conversion for the // compiler-rt library. // //===----------------------------------------------------------------------===// #include "third_party/compiler_rt/fp_lib.inc" static __inline fixuint_t __fixuint(fp_t a) { // Break a into sign, exponent, significand const rep_t aRep = toRep(a); const rep_t aAbs = aRep & absMask; const int sign = aRep & signBit ? -1 : 1; const int exponent = (aAbs >> significandBits) - exponentBias; const rep_t significand = (aAbs & significandMask) | implicitBit; // If either the value or the exponent is negative, the result is zero. if (sign == -1 || exponent < 0) return 0; // If the value is too large for the integer type, saturate. if ((unsigned)exponent >= sizeof(fixuint_t) * CHAR_BIT) return ~(fixuint_t)0; // If 0 <= exponent < significandBits, right shift to get the result. // Otherwise, shift left. if (exponent < significandBits) return significand >> (significandBits - exponent); else return (fixuint_t)significand << (exponent - significandBits); }
1,526
41
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/divmoddi4.c
/* clang-format off */ /*===-- divmoddi4.c - Implement __divmoddi4 --------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __divmoddi4 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: a / b, *rem = a % b */ COMPILER_RT_ABI di_int __divmoddi4(di_int a, di_int b, di_int* rem) { di_int d = __divdi3(a,b); *rem = a - (d*b); return d; }
790
29
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/truncsfhf2.c
/* clang-format off */ //===-- lib/truncsfhf2.c - single -> half conversion --------------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// STATIC_YOINK("huge_compiler_rt_license"); #define SRC_SINGLE #define DST_HALF #include "third_party/compiler_rt/fp_trunc_impl.inc" // Use a forwarding definition and noinline to implement a poor man's alias, // as there isn't a good cross-platform way of defining one. COMPILER_RT_ABI __attribute__((__noinline__)) uint16_t __truncsfhf2(float a) { return __truncXfYf2__(a); } COMPILER_RT_ABI uint16_t __gnu_f2h_ieee(float a) { return __truncsfhf2(a); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI uint16_t __aeabi_f2h(float a) { return __truncsfhf2(a); } #else AEABI_RTABI uint16_t __aeabi_f2h(float a) COMPILER_RT_ALIAS(__truncsfhf2); #endif #endif
1,076
36
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fp_extend_common.inc
/* clang-format off */ //===-lib/fp_extend.h - low precision -> high precision conversion -*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Set source and destination setting // //===----------------------------------------------------------------------===// #ifndef FP_EXTEND_HEADER #define FP_EXTEND_HEADER #include "libc/literal.h" #include "third_party/compiler_rt/int_lib.h" #if defined SRC_SINGLE typedef float src_t; typedef uint32_t src_rep_t; #define SRC_REP_C UINT32_C static const int srcSigBits = 23; #define src_rep_t_clz __builtin_clz #elif defined SRC_DOUBLE typedef double src_t; typedef uint64_t src_rep_t; #define SRC_REP_C UINT64_C static const int srcSigBits = 52; static __inline int src_rep_t_clz(src_rep_t a) { #if defined __LP64__ return __builtin_clzl(a); #else if (a & REP_C(0xffffffff00000000)) return __builtin_clz(a >> 32); else return 32 + __builtin_clz(a & REP_C(0xffffffff)); #endif } #elif defined SRC_HALF typedef uint16_t src_t; typedef uint16_t src_rep_t; #define SRC_REP_C UINT16_C static const int srcSigBits = 10; #define src_rep_t_clz __builtin_clz #else #error Source should be half, single, or double precision! #endif //end source precision #undef DST_REP_C #if defined DST_SINGLE typedef float dst_t; typedef uint32_t dst_rep_t; #define DST_REP_C UINT32_C static const int dstSigBits = 23; #elif defined DST_DOUBLE typedef double dst_t; typedef uint64_t dst_rep_t; #define DST_REP_C UINT64_C static const int dstSigBits = 52; #elif defined DST_QUAD typedef long double dst_t; typedef __uint128_t dst_rep_t; #define DST_REP_C (__uint128_t) static const int dstSigBits = 112; #else #error Destination should be single, double, or quad precision! #endif //end destination precision // End of specialization parameters. Two helper routines for conversion to and // from the representation of floating-point data as integer values follow. static __inline src_rep_t srcToRep(src_t x) { const union { src_t f; src_rep_t i; } rep = {.f = x}; return rep.i; } static __inline dst_t dstFromRep(dst_rep_t x) { const union { dst_t f; dst_rep_t i; } rep = {.i = x}; return rep.f; } // End helper routines. Conversion implementation follows. #endif //FP_EXTEND_HEADER
2,504
94
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/subsf3.c
/* clang-format off */ //===-- lib/subsf3.c - Single-precision subtraction ---------------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements single-precision soft-float subtraction with the // IEEE-754 default rounding (to nearest, ties to even). // //===----------------------------------------------------------------------===// STATIC_YOINK("huge_compiler_rt_license"); #define SINGLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" // Subtraction; flip the sign bit of b and add. COMPILER_RT_ABI fp_t __subsf3(fp_t a, fp_t b) { return __addsf3(a, fromRep(toRep(b) ^ signBit)); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI fp_t __aeabi_fsub(fp_t a, fp_t b) { return __subsf3(a, b); } #else AEABI_RTABI fp_t __aeabi_fsub(fp_t a, fp_t b) COMPILER_RT_ALIAS(__subsf3); #endif #endif
1,098
36
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/udivmoddi4.c
/* clang-format off */ /* ===-- udivmoddi4.c - Implement __udivmoddi4 -----------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __udivmoddi4 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Effects: if rem != 0, *rem = a % b * Returns: a / b */ /* Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide */ COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int* rem) { const unsigned n_uword_bits = sizeof(su_int) * CHAR_BIT; const unsigned n_udword_bits = sizeof(du_int) * CHAR_BIT; udwords n; n.all = a; udwords d; d.all = b; udwords q; udwords r; unsigned sr; /* special cases, X is unknown, K != 0 */ if (n.s.high == 0) { if (d.s.high == 0) { /* 0 X * --- * 0 X */ if (rem) *rem = n.s.low % d.s.low; return n.s.low / d.s.low; } /* 0 X * --- * K X */ if (rem) *rem = n.s.low; return 0; } /* n.s.high != 0 */ if (d.s.low == 0) { if (d.s.high == 0) { /* K X * --- * 0 0 */ if (rem) *rem = n.s.high % d.s.low; return n.s.high / d.s.low; } /* d.s.high != 0 */ if (n.s.low == 0) { /* K 0 * --- * K 0 */ if (rem) { r.s.high = n.s.high % d.s.high; r.s.low = 0; *rem = r.all; } return n.s.high / d.s.high; } /* K K * --- * K 0 */ if ((d.s.high & (d.s.high - 1)) == 0) /* if d is a power of 2 */ { if (rem) { r.s.low = n.s.low; r.s.high = n.s.high & (d.s.high - 1); *rem = r.all; } return n.s.high >> __builtin_ctz(d.s.high); } /* K K * --- * K 0 */ sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high); /* 0 <= sr <= n_uword_bits - 2 or sr large */ if (sr > n_uword_bits - 2) { if (rem) *rem = n.all; return 0; } ++sr; /* 1 <= sr <= n_uword_bits - 1 */ /* q.all = n.all << (n_udword_bits - sr); */ q.s.low = 0; q.s.high = n.s.low << (n_uword_bits - sr); /* r.all = n.all >> sr; */ r.s.high = n.s.high >> sr; r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr); } else /* d.s.low != 0 */ { if (d.s.high == 0) { /* K X * --- * 0 K */ if ((d.s.low & (d.s.low - 1)) == 0) /* if d is a power of 2 */ { if (rem) *rem = n.s.low & (d.s.low - 1); if (d.s.low == 1) return n.all; sr = __builtin_ctz(d.s.low); q.s.high = n.s.high >> sr; q.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr); return q.all; } /* K X * --- * 0 K */ sr = 1 + n_uword_bits + __builtin_clz(d.s.low) - __builtin_clz(n.s.high); /* 2 <= sr <= n_udword_bits - 1 * q.all = n.all << (n_udword_bits - sr); * r.all = n.all >> sr; */ if (sr == n_uword_bits) { q.s.low = 0; q.s.high = n.s.low; r.s.high = 0; r.s.low = n.s.high; } else if (sr < n_uword_bits) // 2 <= sr <= n_uword_bits - 1 { q.s.low = 0; q.s.high = n.s.low << (n_uword_bits - sr); r.s.high = n.s.high >> sr; r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr); } else // n_uword_bits + 1 <= sr <= n_udword_bits - 1 { q.s.low = n.s.low << (n_udword_bits - sr); q.s.high = (n.s.high << (n_udword_bits - sr)) | (n.s.low >> (sr - n_uword_bits)); r.s.high = 0; r.s.low = n.s.high >> (sr - n_uword_bits); } } else { /* K X * --- * K K */ sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high); /* 0 <= sr <= n_uword_bits - 1 or sr large */ if (sr > n_uword_bits - 1) { if (rem) *rem = n.all; return 0; } ++sr; /* 1 <= sr <= n_uword_bits */ /* q.all = n.all << (n_udword_bits - sr); */ q.s.low = 0; if (sr == n_uword_bits) { q.s.high = n.s.low; r.s.high = 0; r.s.low = n.s.high; } else { q.s.high = n.s.low << (n_uword_bits - sr); r.s.high = n.s.high >> sr; r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr); } } } /* Not a special case * q and r are initialized with: * q.all = n.all << (n_udword_bits - sr); * r.all = n.all >> sr; * 1 <= sr <= n_udword_bits - 1 */ su_int carry = 0; for (; sr > 0; --sr) { /* r:q = ((r:q) << 1) | carry */ r.s.high = (r.s.high << 1) | (r.s.low >> (n_uword_bits - 1)); r.s.low = (r.s.low << 1) | (q.s.high >> (n_uword_bits - 1)); q.s.high = (q.s.high << 1) | (q.s.low >> (n_uword_bits - 1)); q.s.low = (q.s.low << 1) | carry; /* carry = 0; * if (r.all >= d.all) * { * r.all -= d.all; * carry = 1; * } */ const di_int s = (di_int)(d.all - r.all - 1) >> (n_udword_bits - 1); carry = s & 1; r.all -= d.all & s; } q.all = (q.all << 1) | carry; if (rem) *rem = r.all; return q.all; }
6,735
235
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/compiler_rt.mk
#-*-mode:makefile-gmake;indent-tabs-mode:t;tab-width:8;coding:utf-8-*-┐ #───vi: set et ft=make ts=8 tw=8 fenc=utf-8 :vi───────────────────────┘ PKGS += THIRD_PARTY_COMPILER_RT THIRD_PARTY_COMPILER_RT_ARTIFACTS += THIRD_PARTY_COMPILER_RT_A THIRD_PARTY_COMPILER_RT = $(THIRD_PARTY_COMPILER_RT_A_DEPS) $(THIRD_PARTY_COMPILER_RT_A) THIRD_PARTY_COMPILER_RT_A = o/$(MODE)/third_party/compiler_rt/compiler_rt.a THIRD_PARTY_COMPILER_RT_A_FILES := \ $(wildcard third_party/compiler_rt/*) \ $(wildcard third_party/compiler_rt/nexgen32e/*) THIRD_PARTY_COMPILER_RT_A_HDRS = $(filter %.h,$(THIRD_PARTY_COMPILER_RT_A_FILES)) THIRD_PARTY_COMPILER_RT_A_INCS = $(filter %.inc,$(THIRD_PARTY_COMPILER_RT_A_FILES)) THIRD_PARTY_COMPILER_RT_A_SRCS_S = $(filter %.S,$(THIRD_PARTY_COMPILER_RT_A_FILES)) THIRD_PARTY_COMPILER_RT_A_SRCS_C = $(filter %.c,$(THIRD_PARTY_COMPILER_RT_A_FILES)) THIRD_PARTY_COMPILER_RT_A_SRCS = \ $(THIRD_PARTY_COMPILER_RT_A_SRCS_S) \ $(THIRD_PARTY_COMPILER_RT_A_SRCS_C) THIRD_PARTY_COMPILER_RT_A_OBJS = \ $(THIRD_PARTY_COMPILER_RT_A_SRCS_S:%.S=o/$(MODE)/%.o) \ $(THIRD_PARTY_COMPILER_RT_A_SRCS_C:%.c=o/$(MODE)/%.o) THIRD_PARTY_COMPILER_RT_A_CHECKS = \ $(THIRD_PARTY_COMPILER_RT_A).pkg \ $(THIRD_PARTY_COMPILER_RT_A_HDRS:%=o/$(MODE)/%.ok) THIRD_PARTY_COMPILER_RT_A_DIRECTDEPS = \ LIBC_INTRIN \ LIBC_NEXGEN32E \ LIBC_STUBS THIRD_PARTY_COMPILER_RT_A_DEPS := \ $(call uniq,$(foreach x,$(THIRD_PARTY_COMPILER_RT_A_DIRECTDEPS),$($(x)))) $(THIRD_PARTY_COMPILER_RT_A): \ third_party/compiler_rt/ \ $(THIRD_PARTY_COMPILER_RT_A).pkg \ $(THIRD_PARTY_COMPILER_RT_A_OBJS) $(THIRD_PARTY_COMPILER_RT_A).pkg: \ $(THIRD_PARTY_COMPILER_RT_A_OBJS) \ $(foreach x,$(THIRD_PARTY_COMPILER_RT_A_DIRECTDEPS),$($(x)_A).pkg) $(THIRD_PARTY_COMPILER_RT_A_OBJS): private \ DEFAULT_CFLAGS += \ $(OLD_CODE) \ -DCRT_HAS_128BIT # these assembly files are safe to build on aarch64 o/$(MODE)/third_party/compiler_rt/comprt.o: third_party/compiler_rt/comprt.S @$(COMPILE) -AOBJECTIFY.S $(OBJECTIFY.S) $(OUTPUT_OPTION) -c $< THIRD_PARTY_COMPILER_RT_LIBS = $(foreach x,$(THIRD_PARTY_COMPILER_RT_ARTIFACTS),$($(x))) THIRD_PARTY_COMPILER_RT_SRCS = $(foreach x,$(THIRD_PARTY_COMPILER_RT_ARTIFACTS),$($(x)_SRCS)) THIRD_PARTY_COMPILER_RT_HDRS = $(foreach x,$(THIRD_PARTY_COMPILER_RT_ARTIFACTS),$($(x)_HDRS)) THIRD_PARTY_COMPILER_RT_INCS = $(foreach x,$(THIRD_PARTY_COMPILER_RT_ARTIFACTS),$($(x)_INCS)) THIRD_PARTY_COMPILER_RT_CHECKS = $(foreach x,$(THIRD_PARTY_COMPILER_RT_ARTIFACTS),$($(x)_CHECKS)) THIRD_PARTY_COMPILER_RT_OBJS = $(foreach x,$(THIRD_PARTY_COMPILER_RT_ARTIFACTS),$($(x)_OBJS)) .PHONY: o/$(MODE)/third_party/compiler_rt o/$(MODE)/third_party/compiler_rt: $(THIRD_PARTY_COMPILER_RT_CHECKS)
2,816
64
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/absvti2.c
/* clang-format off */ /* ===-- absvti2.c - Implement __absvdi2 -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __absvti2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" #ifdef CRT_HAS_128BIT /* Returns: absolute value */ /* Effects: aborts if abs(x) < 0 */ COMPILER_RT_ABI ti_int __absvti2(ti_int a) { const int N = (int)(sizeof(ti_int) * CHAR_BIT); if (a == ((ti_int)1 << (N-1))) compilerrt_abort(); const ti_int s = a >> (N - 1); return (a ^ s) - s; } #endif /* CRT_HAS_128BIT */
962
38
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/negdf2.c
/* clang-format off */ //===-- lib/negdf2.c - double-precision negation ------------------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements double-precision soft-float negation. // //===----------------------------------------------------------------------===// STATIC_YOINK("huge_compiler_rt_license"); #define DOUBLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" COMPILER_RT_ABI fp_t __negdf2(fp_t a) { return fromRep(toRep(a) ^ signBit); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI fp_t __aeabi_dneg(fp_t a) { return __negdf2(a); } #else AEABI_RTABI fp_t __aeabi_dneg(fp_t a) COMPILER_RT_ALIAS(__negdf2); #endif #endif
942
34
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/extendhfsf2.c
/* clang-format off */ //===-- lib/extendhfsf2.c - half -> single conversion -------------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // STATIC_YOINK("huge_compiler_rt_license"); #define SRC_HALF #define DST_SINGLE #include "third_party/compiler_rt/fp_extend_impl.inc" // Use a forwarding definition and noinline to implement a poor man's alias, // as there isn't a good cross-platform way of defining one. COMPILER_RT_ABI __attribute__((__noinline__)) float __extendhfsf2(uint16_t a) { return __extendXfYf2__(a); } COMPILER_RT_ABI float __gnu_h2f_ieee(uint16_t a) { return __extendhfsf2(a); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI float __aeabi_h2f(uint16_t a) { return __extendhfsf2(a); } #else AEABI_RTABI float __aeabi_h2f(uint16_t a) COMPILER_RT_ALIAS(__extendhfsf2); #endif #endif
1,085
37
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/mulxc3.c
/* clang-format off */ /* ===-- mulxc3.c - Implement __mulxc3 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __mulxc3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #if !_ARCH_PPC #include "third_party/compiler_rt/int_lib.h" #include "third_party/compiler_rt/int_math.h" /* Returns: the product of a + ib and c + id */ COMPILER_RT_ABI Lcomplex __mulxc3(long double __a, long double __b, long double __c, long double __d) { long double __ac = __a * __c; long double __bd = __b * __d; long double __ad = __a * __d; long double __bc = __b * __c; Lcomplex z; COMPLEX_REAL(z) = __ac - __bd; COMPLEX_IMAGINARY(z) = __ad + __bc; if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) { int __recalc = 0; if (crt_isinf(__a) || crt_isinf(__b)) { __a = crt_copysignl(crt_isinf(__a) ? 1 : 0, __a); __b = crt_copysignl(crt_isinf(__b) ? 1 : 0, __b); if (crt_isnan(__c)) __c = crt_copysignl(0, __c); if (crt_isnan(__d)) __d = crt_copysignl(0, __d); __recalc = 1; } if (crt_isinf(__c) || crt_isinf(__d)) { __c = crt_copysignl(crt_isinf(__c) ? 1 : 0, __c); __d = crt_copysignl(crt_isinf(__d) ? 1 : 0, __d); if (crt_isnan(__a)) __a = crt_copysignl(0, __a); if (crt_isnan(__b)) __b = crt_copysignl(0, __b); __recalc = 1; } if (!__recalc && (crt_isinf(__ac) || crt_isinf(__bd) || crt_isinf(__ad) || crt_isinf(__bc))) { if (crt_isnan(__a)) __a = crt_copysignl(0, __a); if (crt_isnan(__b)) __b = crt_copysignl(0, __b); if (crt_isnan(__c)) __c = crt_copysignl(0, __c); if (crt_isnan(__d)) __d = crt_copysignl(0, __d); __recalc = 1; } if (__recalc) { COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c - __b * __d); COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__a * __d + __b * __c); } } return z; } #endif
2,602
81
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/int_endianness.h
/* clang-format off */ /* ===-- int_endianness.h - configuration header for compiler-rt ------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file is a configuration header for compiler-rt. * This file is not part of the interface of this library. * * ===----------------------------------------------------------------------=== */ #ifndef INT_ENDIANNESS_H #define INT_ENDIANNESS_H #define _YUGA_LITTLE_ENDIAN 1 #define _YUGA_BIG_ENDIAN 0 #endif /* INT_ENDIANNESS_H */
714
24
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/clzdi2.c
/* clang-format off */ /* ===-- clzdi2.c - Implement __clzdi2 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __clzdi2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: the number of leading 0-bits */ #if !defined(__clang__) && \ ((defined(__sparc__) && defined(__arch64__)) || \ defined(__mips64) || \ (defined(__riscv) && __SIZEOF_POINTER__ >= 8)) /* On 64-bit architectures with neither a native clz instruction nor a native * ctz instruction, gcc resolves __builtin_clz to __clzdi2 rather than * __clzsi2, leading to infinite recursion. */ #define __builtin_clz(a) __clzsi2(a) extern si_int __clzsi2(si_int); #endif /* Precondition: a != 0 */ COMPILER_RT_ABI si_int __clzdi2(di_int a) { dwords x; x.all = a; const si_int f = -(x.s.high == 0); return __builtin_clz((x.s.high & ~f) | (x.s.low & f)) + (f & ((si_int)(sizeof(si_int) * CHAR_BIT))); }
1,484
44
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/floatunsidf.c
/* clang-format off */ //===-- lib/floatunsidf.c - uint -> double-precision conversion ---*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements unsigned integer to double-precision conversion for the // compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even // mode. // //===----------------------------------------------------------------------===// STATIC_YOINK("huge_compiler_rt_license"); #define DOUBLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" #include "third_party/compiler_rt/int_lib.h" COMPILER_RT_ABI fp_t __floatunsidf(unsigned int a) { const int aWidth = sizeof a * CHAR_BIT; // Handle zero as a special case to protect clz if (a == 0) return fromRep(0); // Exponent of (fp_t)a is the width of abs(a). const int exponent = (aWidth - 1) - __builtin_clz(a); rep_t result; // Shift a into the significand field and clear the implicit bit. const int shift = significandBits - exponent; result = (rep_t)a << shift ^ implicitBit; // Insert the exponent result += (rep_t)(exponent + exponentBias) << significandBits; return fromRep(result); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) { return __floatunsidf(a); } #else AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) COMPILER_RT_ALIAS(__floatunsidf); #endif #endif
1,663
54
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/umodsi3.c
/* clang-format off */ /* ===-- umodsi3.c - Implement __umodsi3 -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __umodsi3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: a % b */ COMPILER_RT_ABI su_int __umodsi3(su_int a, su_int b) { return a - __udivsi3(a, b) * b; }
735
27
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/divdf3.c
/* clang-format off */ //===-- lib/divdf3.c - Double-precision division ------------------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements double-precision soft-float division // with the IEEE-754 default rounding (to nearest, ties to even). // // For simplicity, this implementation currently flushes denormals to zero. // It should be a fairly straightforward exercise to implement gradual // underflow with correct rounding. // //===----------------------------------------------------------------------===// STATIC_YOINK("huge_compiler_rt_license"); #define DOUBLE_PRECISION #include "libc/literal.h" #include "third_party/compiler_rt/fp_lib.inc" COMPILER_RT_ABI fp_t __divdf3(fp_t a, fp_t b) { const unsigned int aExponent = toRep(a) >> significandBits & maxExponent; const unsigned int bExponent = toRep(b) >> significandBits & maxExponent; const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit; rep_t aSignificand = toRep(a) & significandMask; rep_t bSignificand = toRep(b) & significandMask; int scale = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent-1U >= maxExponent-1U || bExponent-1U >= maxExponent-1U) { const rep_t aAbs = toRep(a) & absMask; const rep_t bAbs = toRep(b) & absMask; // NaN / anything = qNaN if (aAbs > infRep) return fromRep(toRep(a) | quietBit); // anything / NaN = qNaN if (bAbs > infRep) return fromRep(toRep(b) | quietBit); if (aAbs == infRep) { // infinity / infinity = NaN if (bAbs == infRep) return fromRep(qnanRep); // infinity / anything else = +/- infinity else return fromRep(aAbs | quotientSign); } // anything else / infinity = +/- 0 if (bAbs == infRep) return fromRep(quotientSign); if (!aAbs) { // zero / zero = NaN if (!bAbs) return fromRep(qnanRep); // zero / anything else = +/- zero else return fromRep(quotientSign); } // anything else / zero = +/- infinity if (!bAbs) return fromRep(infRep | quotientSign); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to // include the necessary exponent adjustment. if (aAbs < implicitBit) scale += normalize(&aSignificand); if (bAbs < implicitBit) scale -= normalize(&bSignificand); } // Or in the implicit significand bit. (If we fell through from the // denormal path it was already set by normalize( ), but setting it twice // won't hurt anything.) aSignificand |= implicitBit; bSignificand |= implicitBit; int quotientExponent = aExponent - bExponent + scale; // Align the significand of b as a Q31 fixed-point number in the range // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This // is accurate to about 3.5 binary digits. const uint32_t q31b = bSignificand >> 21; uint32_t recip32 = UINT32_C(0x7504f333) - q31b; // Now refine the reciprocal estimate using a Newton-Raphson iteration: // // x1 = x0 * (2 - x0 * b) // // This doubles the number of correct binary digits in the approximation // with each iteration, so after three iterations, we have about 28 binary // digits of accuracy. uint32_t correction32; correction32 = -((uint64_t)recip32 * q31b >> 32); recip32 = (uint64_t)recip32 * correction32 >> 31; correction32 = -((uint64_t)recip32 * q31b >> 32); recip32 = (uint64_t)recip32 * correction32 >> 31; correction32 = -((uint64_t)recip32 * q31b >> 32); recip32 = (uint64_t)recip32 * correction32 >> 31; // recip32 might have overflowed to exactly zero in the preceding // computation if the high word of b is exactly 1.0. This would sabotage // the full-width final stage of the computation that follows, so we adjust // recip32 downward by one bit. recip32--; // We need to perform one more iteration to get us to 56 binary digits; // The last iteration needs to happen with extra precision. const uint32_t q63blo = bSignificand << 11; uint64_t correction, reciprocal; correction = -((uint64_t)recip32*q31b + ((uint64_t)recip32*q63blo >> 32)); uint32_t cHi = correction >> 32; uint32_t cLo = correction; reciprocal = (uint64_t)recip32*cHi + ((uint64_t)recip32*cLo >> 32); // We already adjusted the 32-bit estimate, now we need to adjust the final // 64-bit reciprocal estimate downward to ensure that it is strictly smaller // than the infinitely precise exact reciprocal. Because the computation // of the Newton-Raphson step is truncating at every step, this adjustment // is small; most of the work is already done. reciprocal -= 2; // The numerical reciprocal is accurate to within 2^-56, lies in the // interval [0.5, 1.0), and is strictly smaller than the true reciprocal // of b. Multiplying a by this reciprocal thus gives a numerical q = a/b // in Q53 with the following properties: // // 1. q < a/b // 2. q is in the interval [0.5, 2.0) // 3. the error in q is bounded away from 2^-53 (actually, we have a // couple of bits to spare, but this is all we need). // We need a 64 x 64 multiply high to compute q, which isn't a basic // operation in C, so we need to be a little bit fussy. rep_t quotient, quotientLo; wideMultiply(aSignificand << 2, reciprocal, &quotient, &quotientLo); // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0). // In either case, we are going to compute a residual of the form // // r = a - q*b // // We know from the construction of q that r satisfies: // // 0 <= r < ulp(q)*b // // if r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we // already have the correct result. The exact halfway case cannot occur. // We also take this time to right shift quotient if it falls in the [1,2) // range and adjust the exponent accordingly. rep_t residual; if (quotient < (implicitBit << 1)) { residual = (aSignificand << 53) - quotient * bSignificand; quotientExponent--; } else { quotient >>= 1; residual = (aSignificand << 52) - quotient * bSignificand; } const int writtenExponent = quotientExponent + exponentBias; if (writtenExponent >= maxExponent) { // If we have overflowed the exponent, return infinity. return fromRep(infRep | quotientSign); } else if (writtenExponent < 1) { // Flush denormals to zero. In the future, it would be nice to add // code to round them correctly. return fromRep(quotientSign); } else { const bool round = (residual << 1) > bSignificand; // Clear the implicit bit rep_t absResult = quotient & significandMask; // Insert the exponent absResult |= (rep_t)writtenExponent << significandBits; // Round absResult += round; // Insert the sign and return const double result = fromRep(absResult | quotientSign); return result; } } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI fp_t __aeabi_ddiv(fp_t a, fp_t b) { return __divdf3(a, b); } #else AEABI_RTABI fp_t __aeabi_ddiv(fp_t a, fp_t b) COMPILER_RT_ALIAS(__divdf3); #endif #endif
7,974
198
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/os_version_check.c
/* clang-format off */ /* ===-- os_version_check.c - OS version checking -------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements the function __isOSVersionAtLeast, used by * Objective-C's @available * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #ifdef __APPLE__ #include <CoreFoundation/CoreFoundation.h> #include <TargetConditionals.h> #include <dispatch/dispatch.h> #include <dlfcn.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> /* These three variables hold the host's OS version. */ static int32_t GlobalMajor, GlobalMinor, GlobalSubminor; static dispatch_once_t DispatchOnceCounter; typedef CFDataRef (*CFDataCreateWithBytesNoCopyFuncTy)(CFAllocatorRef, const UInt8 *, CFIndex, CFAllocatorRef); typedef CFPropertyListRef (*CFPropertyListCreateWithDataFuncTy)( CFAllocatorRef, CFDataRef, CFOptionFlags, CFPropertyListFormat *, CFErrorRef *); typedef CFPropertyListRef (*CFPropertyListCreateFromXMLDataFuncTy)( CFAllocatorRef, CFDataRef, CFOptionFlags, CFStringRef *); typedef CFStringRef (*CFStringCreateWithCStringNoCopyFuncTy)(CFAllocatorRef, const char *, CFStringEncoding, CFAllocatorRef); typedef const void *(*CFDictionaryGetValueFuncTy)(CFDictionaryRef, const void *); typedef CFTypeID (*CFGetTypeIDFuncTy)(CFTypeRef); typedef CFTypeID (*CFStringGetTypeIDFuncTy)(void); typedef Boolean (*CFStringGetCStringFuncTy)(CFStringRef, char *, CFIndex, CFStringEncoding); typedef void (*CFReleaseFuncTy)(CFTypeRef); /* Find and parse the SystemVersion.plist file. */ static void parseSystemVersionPList(void *Unused) { (void)Unused; /* Load CoreFoundation dynamically */ const void *NullAllocator = dlsym(RTLD_DEFAULT, "kCFAllocatorNull"); if (!NullAllocator) return; const CFAllocatorRef kCFAllocatorNull = *(const CFAllocatorRef *)NullAllocator; CFDataCreateWithBytesNoCopyFuncTy CFDataCreateWithBytesNoCopyFunc = (CFDataCreateWithBytesNoCopyFuncTy)dlsym(RTLD_DEFAULT, "CFDataCreateWithBytesNoCopy"); if (!CFDataCreateWithBytesNoCopyFunc) return; CFPropertyListCreateWithDataFuncTy CFPropertyListCreateWithDataFunc = (CFPropertyListCreateWithDataFuncTy)dlsym( RTLD_DEFAULT, "CFPropertyListCreateWithData"); /* CFPropertyListCreateWithData was introduced only in macOS 10.6+, so it * will be NULL on earlier OS versions. */ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" CFPropertyListCreateFromXMLDataFuncTy CFPropertyListCreateFromXMLDataFunc = (CFPropertyListCreateFromXMLDataFuncTy)dlsym( RTLD_DEFAULT, "CFPropertyListCreateFromXMLData"); #pragma clang diagnostic pop /* CFPropertyListCreateFromXMLDataFunc is deprecated in macOS 10.10, so it * might be NULL in future OS versions. */ if (!CFPropertyListCreateWithDataFunc && !CFPropertyListCreateFromXMLDataFunc) return; CFStringCreateWithCStringNoCopyFuncTy CFStringCreateWithCStringNoCopyFunc = (CFStringCreateWithCStringNoCopyFuncTy)dlsym( RTLD_DEFAULT, "CFStringCreateWithCStringNoCopy"); if (!CFStringCreateWithCStringNoCopyFunc) return; CFDictionaryGetValueFuncTy CFDictionaryGetValueFunc = (CFDictionaryGetValueFuncTy)dlsym(RTLD_DEFAULT, "CFDictionaryGetValue"); if (!CFDictionaryGetValueFunc) return; CFGetTypeIDFuncTy CFGetTypeIDFunc = (CFGetTypeIDFuncTy)dlsym(RTLD_DEFAULT, "CFGetTypeID"); if (!CFGetTypeIDFunc) return; CFStringGetTypeIDFuncTy CFStringGetTypeIDFunc = (CFStringGetTypeIDFuncTy)dlsym(RTLD_DEFAULT, "CFStringGetTypeID"); if (!CFStringGetTypeIDFunc) return; CFStringGetCStringFuncTy CFStringGetCStringFunc = (CFStringGetCStringFuncTy)dlsym(RTLD_DEFAULT, "CFStringGetCString"); if (!CFStringGetCStringFunc) return; CFReleaseFuncTy CFReleaseFunc = (CFReleaseFuncTy)dlsym(RTLD_DEFAULT, "CFRelease"); if (!CFReleaseFunc) return; char *PListPath = "/System/Library/CoreServices/SystemVersion.plist"; #if TARGET_OS_SIMULATOR char *PListPathPrefix = getenv("IPHONE_SIMULATOR_ROOT"); if (!PListPathPrefix) return; char FullPath[strlen(PListPathPrefix) + strlen(PListPath) + 1]; strcpy(FullPath, PListPathPrefix); strcat(FullPath, PListPath); PListPath = FullPath; #endif FILE *PropertyList = fopen(PListPath, "r"); if (!PropertyList) return; /* Dynamically allocated stuff. */ CFDictionaryRef PListRef = NULL; CFDataRef FileContentsRef = NULL; UInt8 *PListBuf = NULL; fseek(PropertyList, 0, SEEK_END); long PListFileSize = ftell(PropertyList); if (PListFileSize < 0) goto Fail; rewind(PropertyList); PListBuf = malloc((size_t)PListFileSize); if (!PListBuf) goto Fail; size_t NumRead = fread(PListBuf, 1, (size_t)PListFileSize, PropertyList); if (NumRead != (size_t)PListFileSize) goto Fail; /* Get the file buffer into CF's format. We pass in a null allocator here * * because we free PListBuf ourselves */ FileContentsRef = (*CFDataCreateWithBytesNoCopyFunc)( NULL, PListBuf, (CFIndex)NumRead, kCFAllocatorNull); if (!FileContentsRef) goto Fail; if (CFPropertyListCreateWithDataFunc) PListRef = (*CFPropertyListCreateWithDataFunc)( NULL, FileContentsRef, kCFPropertyListImmutable, NULL, NULL); else PListRef = (*CFPropertyListCreateFromXMLDataFunc)( NULL, FileContentsRef, kCFPropertyListImmutable, NULL); if (!PListRef) goto Fail; CFStringRef ProductVersion = (*CFStringCreateWithCStringNoCopyFunc)( NULL, "ProductVersion", kCFStringEncodingASCII, kCFAllocatorNull); if (!ProductVersion) goto Fail; CFTypeRef OpaqueValue = (*CFDictionaryGetValueFunc)(PListRef, ProductVersion); (*CFReleaseFunc)(ProductVersion); if (!OpaqueValue || (*CFGetTypeIDFunc)(OpaqueValue) != (*CFStringGetTypeIDFunc)()) goto Fail; char VersionStr[32]; if (!(*CFStringGetCStringFunc)((CFStringRef)OpaqueValue, VersionStr, sizeof(VersionStr), kCFStringEncodingUTF8)) goto Fail; sscanf(VersionStr, "%d.%d.%d", &GlobalMajor, &GlobalMinor, &GlobalSubminor); Fail: if (PListRef) (*CFReleaseFunc)(PListRef); if (FileContentsRef) (*CFReleaseFunc)(FileContentsRef); free(PListBuf); fclose(PropertyList); } int32_t __isOSVersionAtLeast(int32_t Major, int32_t Minor, int32_t Subminor) { /* Populate the global version variables, if they haven't already. */ dispatch_once_f(&DispatchOnceCounter, NULL, parseSystemVersionPList); if (Major < GlobalMajor) return 1; if (Major > GlobalMajor) return 0; if (Minor < GlobalMinor) return 1; if (Minor > GlobalMinor) return 0; return Subminor <= GlobalSubminor; } #else /* Silence an empty translation unit warning. */ typedef int unused; #endif
7,541
205
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fixunssfdi.c
/* clang-format off */ /* ===-- fixunssfdi.c - Implement __fixunssfdi -----------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #define SINGLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" #ifndef __SOFT_FP__ /* Support for systems that have hardware floating-point; can set the invalid * flag as a side-effect of computation. */ COMPILER_RT_ABI du_int __fixunssfdi(float a) { if (a <= 0.0f) return 0; double da = a; su_int high = da / 4294967296.f; /* da / 0x1p32f; */ su_int low = da - (double)high * 4294967296.f; /* high * 0x1p32f; */ return ((du_int)high << 32) | low; } #else /* Support for systems that don't have hardware floating-point; there are no * flags to set, and we don't want to code-gen to an unknown soft-float * implementation. */ typedef du_int fixuint_t; #include "third_party/compiler_rt/fp_fixuint_impl.inc" COMPILER_RT_ABI du_int __fixunssfdi(fp_t a) { return __fixuint(a); } #endif #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) { return __fixunssfdi(a); } #else AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) COMPILER_RT_ALIAS(__fixunssfdi); #endif #endif
1,494
57
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/multc3.c
/* clang-third_party/compiler_rt/format off */ /* ===-- multc3.c - Implement __multc3 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __multc3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" #include "third_party/compiler_rt/int_math.h" /* Returns: the product of a + ib and c + id */ COMPILER_RT_ABI long double _Complex __multc3(long double a, long double b, long double c, long double d) { long double ac = a * c; long double bd = b * d; long double ad = a * d; long double bc = b * c; long double _Complex z; __real__ z = ac - bd; __imag__ z = ad + bc; if (crt_isnan(__real__ z) && crt_isnan(__imag__ z)) { int recalc = 0; if (crt_isinf(a) || crt_isinf(b)) { a = crt_copysignl(crt_isinf(a) ? 1 : 0, a); b = crt_copysignl(crt_isinf(b) ? 1 : 0, b); if (crt_isnan(c)) c = crt_copysignl(0, c); if (crt_isnan(d)) d = crt_copysignl(0, d); recalc = 1; } if (crt_isinf(c) || crt_isinf(d)) { c = crt_copysignl(crt_isinf(c) ? 1 : 0, c); d = crt_copysignl(crt_isinf(d) ? 1 : 0, d); if (crt_isnan(a)) a = crt_copysignl(0, a); if (crt_isnan(b)) b = crt_copysignl(0, b); recalc = 1; } if (!recalc && (crt_isinf(ac) || crt_isinf(bd) || crt_isinf(ad) || crt_isinf(bc))) { if (crt_isnan(a)) a = crt_copysignl(0, a); if (crt_isnan(b)) b = crt_copysignl(0, b); if (crt_isnan(c)) c = crt_copysignl(0, c); if (crt_isnan(d)) d = crt_copysignl(0, d); recalc = 1; } if (recalc) { __real__ z = CRT_INFINITY * (a * c - b * d); __imag__ z = CRT_INFINITY * (a * d + b * c); } } return z; }
2,131
63
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fixunsdfsi.c
/* clang-format off */ /* ===-- fixunsdfsi.c - Implement __fixunsdfsi -----------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #define DOUBLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" typedef su_int fixuint_t; #include "third_party/compiler_rt/fp_fixuint_impl.inc" COMPILER_RT_ABI su_int __fixunsdfsi(fp_t a) { return __fixuint(a); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI su_int __aeabi_d2uiz(fp_t a) { return __fixunsdfsi(a); } #else AEABI_RTABI su_int __aeabi_d2uiz(fp_t a) COMPILER_RT_ALIAS(__fixunsdfsi); #endif #endif
881
33
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/paritysi2.c
/* clang-format off */ /* ===-- paritysi2.c - Implement __paritysi2 -------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __paritysi2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: 1 if number of bits is odd else returns 0 */ COMPILER_RT_ABI si_int __paritysi2(si_int a) { su_int x = (su_int)a; x ^= x >> 16; x ^= x >> 8; x ^= x >> 4; return (0x6996 >> (x & 0xF)) & 1; }
845
31
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/divdi3.c
/* clang-format off */ /* ===-- divdi3.c - Implement __divdi3 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __divdi3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: a / b */ COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b) { const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1; di_int s_a = a >> bits_in_dword_m1; /* s_a = a < 0 ? -1 : 0 */ di_int s_b = b >> bits_in_dword_m1; /* s_b = b < 0 ? -1 : 0 */ a = (a ^ s_a) - s_a; /* negate if s_a == -1 */ b = (b ^ s_b) - s_b; /* negate if s_b == -1 */ s_a ^= s_b; /*sign of quotient */ return (__udivmoddi4(a, b, (du_int*)0) ^ s_a) - s_a; /* negate if s_a == -1 */ }
1,227
33
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/cmpti2.c
/* clang-format off */ /* ===-- cmpti2.c - Implement __cmpti2 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __cmpti2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" #ifdef CRT_HAS_128BIT /* Returns: if (a < b) returns 0 * if (a == b) returns 1 * if (a > b) returns 2 */ COMPILER_RT_ABI si_int __cmpti2(ti_int a, ti_int b) { twords x; x.all = a; twords y; y.all = b; if (x.s.high < y.s.high) return 0; if (x.s.high > y.s.high) return 2; if (x.s.low < y.s.low) return 0; if (x.s.low > y.s.low) return 2; return 1; } #endif /* CRT_HAS_128BIT */
1,093
46
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fixdfsi.c
/* clang-format off */ /* ===-- fixdfsi.c - Implement __fixdfsi -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #define DOUBLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" typedef si_int fixint_t; typedef su_int fixuint_t; #include "third_party/compiler_rt/fp_fixint_impl.inc" COMPILER_RT_ABI si_int __fixdfsi(fp_t a) { return __fixint(a); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI si_int __aeabi_d2iz(fp_t a) { return __fixdfsi(a); } #else AEABI_RTABI si_int __aeabi_d2iz(fp_t a) COMPILER_RT_ALIAS(__fixdfsi); #endif #endif
893
34
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/muldi3.c
/* clang-format off */ /* ===-- muldi3.c - Implement __muldi3 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __muldi3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: a * b */ static di_int __muldsi3(su_int a, su_int b) { dwords r; const int bits_in_word_2 = (int)(sizeof(si_int) * CHAR_BIT) / 2; const su_int lower_mask = (su_int)~0 >> bits_in_word_2; r.s.low = (a & lower_mask) * (b & lower_mask); su_int t = r.s.low >> bits_in_word_2; r.s.low &= lower_mask; t += (a >> bits_in_word_2) * (b & lower_mask); r.s.low += (t & lower_mask) << bits_in_word_2; r.s.high = t >> bits_in_word_2; t = r.s.low >> bits_in_word_2; r.s.low &= lower_mask; t += (b >> bits_in_word_2) * (a & lower_mask); r.s.low += (t & lower_mask) << bits_in_word_2; r.s.high += t >> bits_in_word_2; r.s.high += (a >> bits_in_word_2) * (b >> bits_in_word_2); return r.all; } /* Returns: a * b */ COMPILER_RT_ABI di_int __muldi3(di_int a, di_int b) { dwords x; x.all = a; dwords y; y.all = b; dwords r; r.all = __muldsi3(x.s.low, y.s.low); r.s.high += x.s.high * y.s.low + x.s.low * y.s.high; return r.all; } #if defined(__ARM_EABI__) AEABI_RTABI di_int __aeabi_lmul(di_int a, di_int b) COMPILER_RT_ALIAS(__muldi3); #endif
1,754
62
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/ucmpdi2.c
/* clang-format off */ /* ===-- ucmpdi2.c - Implement __ucmpdi2 -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __ucmpdi2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: if (a < b) returns 0 * if (a == b) returns 1 * if (a > b) returns 2 */ COMPILER_RT_ABI si_int __ucmpdi2(du_int a, du_int b) { udwords x; x.all = a; udwords y; y.all = b; if (x.s.high < y.s.high) return 0; if (x.s.high > y.s.high) return 2; if (x.s.low < y.s.low) return 0; if (x.s.low > y.s.low) return 2; return 1; } #ifdef __ARM_EABI__ /* Returns: if (a < b) returns -1 * if (a == b) returns 0 * if (a > b) returns 1 */ COMPILER_RT_ABI si_int __aeabi_ulcmp(di_int a, di_int b) { return __ucmpdi2(a, b) - 1; } #endif
1,272
55
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/negdi2.c
/* clang-format off */ /* ===-- negdi2.c - Implement __negdi2 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __negdi2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: -a */ COMPILER_RT_ABI di_int __negdi2(di_int a) { /* Note: this routine is here for API compatibility; any sane compiler * should expand it inline. */ return -a; }
814
30
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fixxfti.c
/* clang-format off */ /* ===-- fixxfti.c - Implement __fixxfti -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __fixxfti for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" #ifdef CRT_HAS_128BIT /* Returns: convert a to a signed long long, rounding toward zero. */ /* Assumption: long double is an intel 80 bit floating point type padded with 6 bytes * ti_int is a 128 bit integral type * value in long double is representable in ti_int */ /* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee | * 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */ COMPILER_RT_ABI ti_int __fixxfti(long double a) { const ti_int ti_max = (ti_int)((~(tu_int)0) / 2); const ti_int ti_min = -ti_max - 1; long_double_bits fb; fb.f = a; int e = (fb.u.high.s.low & 0x00007FFF) - 16383; if (e < 0) return 0; ti_int s = -(si_int)((fb.u.high.s.low & 0x00008000) >> 15); ti_int r = fb.u.low.all; if ((unsigned)e >= sizeof(ti_int) * CHAR_BIT) return a > 0 ? ti_max : ti_min; if (e > 63) r <<= (e - 63); else r >>= (63 - e); return (r ^ s) - s; } #endif /* CRT_HAS_128BIT */
1,665
55
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/lshrdi3.c
/* clang-format off */ /* ===-- lshrdi3.c - Implement __lshrdi3 -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __lshrdi3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: logical a >> b */ /* Precondition: 0 <= b < bits_in_dword */ COMPILER_RT_ABI di_int __lshrdi3(di_int a, si_int b) { const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT); udwords input; udwords result; input.all = a; if (b & bits_in_word) /* bits_in_word <= b < bits_in_dword */ { result.s.high = 0; result.s.low = input.s.high >> (b - bits_in_word); } else /* 0 <= b < bits_in_word */ { if (b == 0) return a; result.s.high = input.s.high >> b; result.s.low = (input.s.high << (bits_in_word - b)) | (input.s.low >> b); } return result.all; } #if defined(__ARM_EABI__) AEABI_RTABI di_int __aeabi_llsr(di_int a, si_int b) COMPILER_RT_ALIAS(__lshrdi3); #endif
1,396
49
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fp_trunc_impl.inc
/* clang-format off */ //= lib/fp_trunc_impl.inc - high precision -> low precision conversion *-*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements a fairly generic conversion from a wider to a narrower // IEEE-754 floating-point type in the default (round to nearest, ties to even) // rounding mode. The constants and types defined following the includes below // parameterize the conversion. // // This routine can be trivially adapted to support conversions to // half-precision or from quad-precision. It does not support types that don't // use the usual IEEE-754 interchange formats; specifically, some work would be // needed to adapt it to (for example) the Intel 80-bit format or PowerPC // double-double format. // // Note please, however, that this implementation is only intended to support // *narrowing* operations; if you need to convert to a *wider* floating-point // type (e.g. float -> double), then this routine will not do what you want it // to. // // It also requires that integer types at least as large as both formats // are available on the target platform; this may pose a problem when trying // to add support for quad on some 32-bit systems, for example. // // Finally, the following assumptions are made: // // 1. floating-point types and integer types have the same endianness on the // target platform // // 2. quiet NaNs, if supported, are indicated by the leading bit of the // significand field being set // //===----------------------------------------------------------------------===// #include "libc/literal.h" #include "third_party/compiler_rt/fp_trunc_common.inc" static __inline dst_t __truncXfYf2__(src_t a) { // Various constants whose values follow from the type parameters. // Any reasonable optimizer will fold and propagate all of these. const int srcBits = sizeof(src_t)*CHAR_BIT; const int srcExpBits = srcBits - srcSigBits - 1; const int srcInfExp = (1u << srcExpBits) - 1; const int srcExpBias = srcInfExp >> 1; const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigBits; const src_rep_t srcSignificandMask = srcMinNormal - 1; const src_rep_t srcInfinity = (src_rep_t)srcInfExp << srcSigBits; const src_rep_t srcSignMask = SRC_REP_C(1) << (srcSigBits + srcExpBits); const src_rep_t srcAbsMask = srcSignMask - 1; const src_rep_t roundMask = (SRC_REP_C(1) << (srcSigBits - dstSigBits)) - 1; const src_rep_t halfway = SRC_REP_C(1) << (srcSigBits - dstSigBits - 1); const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigBits - 1); const src_rep_t srcNaNCode = srcQNaN - 1; const int dstBits = sizeof(dst_t)*CHAR_BIT; const int dstExpBits = dstBits - dstSigBits - 1; const int dstInfExp = (1u << dstExpBits) - 1; const int dstExpBias = dstInfExp >> 1; const int underflowExponent = srcExpBias + 1 - dstExpBias; const int overflowExponent = srcExpBias + dstInfExp - dstExpBias; const src_rep_t underflow = (src_rep_t)underflowExponent << srcSigBits; const src_rep_t overflow = (src_rep_t)overflowExponent << srcSigBits; const dst_rep_t dstQNaN = DST_REP_C(1) << (dstSigBits - 1); const dst_rep_t dstNaNCode = dstQNaN - 1; // Break a into a sign and representation of the absolute value const src_rep_t aRep = srcToRep(a); const src_rep_t aAbs = aRep & srcAbsMask; const src_rep_t sign = aRep & srcSignMask; dst_rep_t absResult; if (aAbs - underflow < aAbs - overflow) { // The exponent of a is within the range of normal numbers in the // destination format. We can convert by simply right-shifting with // rounding and adjusting the exponent. absResult = aAbs >> (srcSigBits - dstSigBits); absResult -= (dst_rep_t)(srcExpBias - dstExpBias) << dstSigBits; const src_rep_t roundBits = aAbs & roundMask; // Round to nearest if (roundBits > halfway) absResult++; // Ties to even else if (roundBits == halfway) absResult += absResult & 1; } else if (aAbs > srcInfinity) { // a is NaN. // Conjure the result by beginning with infinity, setting the qNaN // bit and inserting the (truncated) trailing NaN field. absResult = (dst_rep_t)dstInfExp << dstSigBits; absResult |= dstQNaN; absResult |= ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode; } else if (aAbs >= overflow) { // a overflows to infinity. absResult = (dst_rep_t)dstInfExp << dstSigBits; } else { // a underflows on conversion to the destination type or is an exact // zero. The result may be a denormal or zero. Extract the exponent // to get the shift amount for the denormalization. const int aExp = aAbs >> srcSigBits; const int shift = srcExpBias - dstExpBias - aExp + 1; const src_rep_t significand = (aRep & srcSignificandMask) | srcMinNormal; // Right shift by the denormalization amount with sticky. if (shift > srcSigBits) { absResult = 0; } else { const bool sticky = significand << (srcBits - shift); src_rep_t denormalizedSignificand = significand >> shift | sticky; absResult = denormalizedSignificand >> (srcSigBits - dstSigBits); const src_rep_t roundBits = denormalizedSignificand & roundMask; // Round to nearest if (roundBits > halfway) absResult++; // Ties to even else if (roundBits == halfway) absResult += absResult & 1; } } // Apply the signbit to (dst_t)abs(a). const dst_rep_t result = absResult | sign >> (srcBits - dstBits); return dstFromRep(result); }
6,026
138
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/divmodsi4.c
/* clang-format off */ /*===-- divmodsi4.c - Implement __divmodsi4 --------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __divmodsi4 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: a / b, *rem = a % b */ COMPILER_RT_ABI si_int __divmodsi4(si_int a, si_int b, si_int* rem) { si_int d = __divsi3(a,b); *rem = a - (d*b); return d; }
793
31
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/cmpdi2.c
/* clang-format off */ /* ===-- cmpdi2.c - Implement __cmpdi2 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __cmpdi2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: if (a < b) returns 0 * if (a == b) returns 1 * if (a > b) returns 2 */ COMPILER_RT_ABI si_int __cmpdi2(di_int a, di_int b) { dwords x; x.all = a; dwords y; y.all = b; if (x.s.high < y.s.high) return 0; if (x.s.high > y.s.high) return 2; if (x.s.low < y.s.low) return 0; if (x.s.low > y.s.low) return 2; return 1; } #ifdef __ARM_EABI__ /* Returns: if (a < b) returns -1 * if (a == b) returns 0 * if (a > b) returns 1 */ COMPILER_RT_ABI si_int __aeabi_lcmp(di_int a, di_int b) { return __cmpdi2(a, b) - 1; } #endif
1,262
55
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/floattitf.c
/* clang-format off */ //===-- lib/floattitf.c - int128 -> quad-precision conversion -----*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements ti_int to quad-precision conversion for the // compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even // mode. // //===----------------------------------------------------------------------===// STATIC_YOINK("huge_compiler_rt_license"); #define QUAD_PRECISION #include "third_party/compiler_rt/fp_lib.inc" #include "third_party/compiler_rt/int_lib.h" /* Returns: convert a ti_int to a fp_t, rounding toward even. */ /* Assumption: fp_t is a IEEE 128 bit floating point type * ti_int is a 128 bit integral type */ /* seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | * mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */ #if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) COMPILER_RT_ABI fp_t __floattitf(ti_int a) { if (a == 0) return 0.0; const unsigned N = sizeof(ti_int) * CHAR_BIT; const ti_int s = a >> (N-1); a = (a ^ s) - s; int sd = N - __clzti2(a); /* number of significant digits */ int e = sd - 1; /* exponent */ if (sd > LDBL_MANT_DIG) { /* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx * finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR * 12345678901234567890123456 * 1 = msb 1 bit * P = bit LDBL_MANT_DIG-1 bits to the right of 1 * Q = bit LDBL_MANT_DIG bits to the right of 1 * R = "or" of all bits to the right of Q */ switch (sd) { case LDBL_MANT_DIG + 1: a <<= 1; break; case LDBL_MANT_DIG + 2: break; default: a = ((tu_int)a >> (sd - (LDBL_MANT_DIG+2))) | ((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG+2) - sd))) != 0); }; /* finish: */ a |= (a & 4) != 0; /* Or P into R */ ++a; /* round - this step may add a significant bit */ a >>= 2; /* dump Q and R */ /* a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits */ if (a & ((tu_int)1 << LDBL_MANT_DIG)) { a >>= 1; ++e; } /* a is now rounded to LDBL_MANT_DIG bits */ } else { a <<= (LDBL_MANT_DIG - sd); /* a is now rounded to LDBL_MANT_DIG bits */ } long_double_bits fb; fb.u.high.all = (s & 0x8000000000000000LL) /* sign */ | (du_int)(e + 16383) << 48 /* exponent */ | ((a >> 64) & 0x0000ffffffffffffLL); /* significand */ fb.u.low.all = (du_int)(a); return fb.f; } #endif
3,084
86
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fp_mul_impl.inc
/* clang-format off */ //===---- lib/fp_mul_impl.inc - floating point multiplication -----*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements soft-float multiplication with the IEEE-754 default // rounding (to nearest, ties to even). // //===----------------------------------------------------------------------===// #include "libc/literal.h" #include "third_party/compiler_rt/fp_lib.inc" static __inline fp_t __mulXf3__(fp_t a, fp_t b) { const unsigned int aExponent = toRep(a) >> significandBits & maxExponent; const unsigned int bExponent = toRep(b) >> significandBits & maxExponent; const rep_t productSign = (toRep(a) ^ toRep(b)) & signBit; rep_t aSignificand = toRep(a) & significandMask; rep_t bSignificand = toRep(b) & significandMask; int scale = 0; // Detect if a or b is zero, denormal, infinity, or NaN. if (aExponent-1U >= maxExponent-1U || bExponent-1U >= maxExponent-1U) { const rep_t aAbs = toRep(a) & absMask; const rep_t bAbs = toRep(b) & absMask; // NaN * anything = qNaN if (aAbs > infRep) return fromRep(toRep(a) | quietBit); // anything * NaN = qNaN if (bAbs > infRep) return fromRep(toRep(b) | quietBit); if (aAbs == infRep) { // infinity * non-zero = +/- infinity if (bAbs) return fromRep(aAbs | productSign); // infinity * zero = NaN else return fromRep(qnanRep); } if (bAbs == infRep) { //? non-zero * infinity = +/- infinity if (aAbs) return fromRep(bAbs | productSign); // zero * infinity = NaN else return fromRep(qnanRep); } // zero * anything = +/- zero if (!aAbs) return fromRep(productSign); // anything * zero = +/- zero if (!bAbs) return fromRep(productSign); // one or both of a or b is denormal, the other (if applicable) is a // normal number. Renormalize one or both of a and b, and set scale to // include the necessary exponent adjustment. if (aAbs < implicitBit) scale += normalize(&aSignificand); if (bAbs < implicitBit) scale += normalize(&bSignificand); } // Or in the implicit significand bit. (If we fell through from the // denormal path it was already set by normalize( ), but setting it twice // won't hurt anything.) aSignificand |= implicitBit; bSignificand |= implicitBit; // Get the significand of a*b. Before multiplying the significands, shift // one of them left to left-align it in the field. Thus, the product will // have (exponentBits + 2) integral digits, all but two of which must be // zero. Normalizing this result is just a conditional left-shift by one // and bumping the exponent accordingly. rep_t productHi, productLo; wideMultiply(aSignificand, bSignificand << exponentBits, &productHi, &productLo); int productExponent = aExponent + bExponent - exponentBias + scale; // Normalize the significand, adjust exponent if needed. if (productHi & implicitBit) productExponent++; else wideLeftShift(&productHi, &productLo, 1); // If we have overflowed the type, return +/- infinity. if (productExponent >= maxExponent) return fromRep(infRep | productSign); if (productExponent <= 0) { // Result is denormal before rounding // // If the result is so small that it just underflows to zero, return // a zero of the appropriate sign. Mathematically there is no need to // handle this case separately, but we make it a special case to // simplify the shift logic. const unsigned int shift = REP_C(1) - (unsigned int)productExponent; if (shift >= typeWidth) return fromRep(productSign); // Otherwise, shift the significand of the result so that the round // bit is the high bit of productLo. wideRightShiftWithSticky(&productHi, &productLo, shift); } else { // Result is normal before rounding; insert the exponent. productHi &= significandMask; productHi |= (rep_t)productExponent << significandBits; } // Insert the sign of the result: productHi |= productSign; // Final rounding. The final result may overflow to infinity, or underflow // to zero, but those are the correct results in those cases. We use the // default IEEE-754 round-to-nearest, ties-to-even rounding mode. if (productLo > signBit) productHi++; if (productLo == signBit) productHi += productHi & 1; return fromRep(productHi); }
4,882
119
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fixsfdi.c
/* clang-format off */ /* ===-- fixsfdi.c - Implement __fixsfdi -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #define SINGLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" #ifndef __SOFT_FP__ /* Support for systems that have hardware floating-point; can set the invalid * flag as a side-effect of computation. */ COMPILER_RT_ABI du_int __fixunssfdi(float a); COMPILER_RT_ABI di_int __fixsfdi(float a) { if (a < 0.0f) { return -__fixunssfdi(-a); } return __fixunssfdi(a); } #else /* Support for systems that don't have hardware floating-point; there are no * flags to set, and we don't want to code-gen to an unknown soft-float * implementation. */ typedef di_int fixint_t; typedef du_int fixuint_t; #include "third_party/compiler_rt/fp_fixint_impl.inc" COMPILER_RT_ABI di_int __fixsfdi(fp_t a) { return __fixint(a); } #endif #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI di_int __aeabi_f2lz(fp_t a) { return __fixsfdi(a); } #else AEABI_RTABI di_int __aeabi_f2lz(fp_t a) COMPILER_RT_ALIAS(__fixsfdi); #endif #endif
1,407
59
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/udivmodsi4.c
/* clang-format off */ /*===-- udivmodsi4.c - Implement __udivmodsi4 ------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __udivmodsi4 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: a / b, *rem = a % b */ COMPILER_RT_ABI su_int __udivmodsi4(su_int a, su_int b, su_int* rem) { si_int d = __udivsi3(a,b); *rem = a - (d*b); return d; }
795
31
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/ffsti2.c
/* clang-format off */ /* ===-- ffsti2.c - Implement __ffsti2 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __ffsti2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" #ifdef CRT_HAS_128BIT /* Returns: the index of the least significant 1-bit in a, or * the value zero if a is zero. The least significant bit is index one. */ COMPILER_RT_ABI si_int __ffsti2(ti_int a) { twords x; x.all = a; if (x.s.low == 0) { if (x.s.high == 0) return 0; return __builtin_ctzll(x.s.high) + (1 + sizeof(di_int) * CHAR_BIT); } return __builtin_ctzll(x.s.low) + 1; } #endif /* CRT_HAS_128BIT */
1,085
41
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/ffssi2.c
/* clang-format off */ /* ===-- ffssi2.c - Implement __ffssi2 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __ffssi2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: the index of the least significant 1-bit in a, or * the value zero if a is zero. The least significant bit is index one. */ COMPILER_RT_ABI si_int __ffssi2(si_int a) { if (a == 0) { return 0; } return __builtin_ctz(a) + 1; }
883
33
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/ilogbl.c
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ │vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ Musl Libc │ │ Copyright © 2005-2014 Rich Felker, et al. │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/limits.h" #include "libc/math.h" #include "libc/tinymath/internal.h" #include "libc/tinymath/ldshape.internal.h" asm(".ident\t\"\\n\\n\ Musl libc (MIT License)\\n\ Copyright 2005-2014 Rich Felker, et. al.\""); asm(".include \"libc/disclaimer.inc\""); /* clang-format off */ #if LDBL_MANT_DIG == 53 && LDBL_MAX_EXP == 1024 int ilogbl(long double x) { return ilogb(x); } #elif LDBL_MANT_DIG == 64 && LDBL_MAX_EXP == 16384 int ilogbl(long double x) { //#pragma STDC FENV_ACCESS ON union ldshape u = {x}; uint64_t m = u.i.m; int e = u.i.se & 0x7fff; if (!e) { if (m == 0) { FORCE_EVAL(0/0.0f); return FP_ILOGB0; } /* subnormal x */ for (e = -0x3fff+1; m>>63 == 0; e--, m<<=1); return e; } if (e == 0x7fff) { FORCE_EVAL(0/0.0f); return m<<1 ? FP_ILOGBNAN : INT_MAX; } return e - 0x3fff; } #elif LDBL_MANT_DIG == 113 && LDBL_MAX_EXP == 16384 int ilogbl(long double x) { //#pragma STDC FENV_ACCESS ON union ldshape u = {x}; int e = u.i.se & 0x7fff; if (!e) { if (x == 0) { FORCE_EVAL(0/0.0f); return FP_ILOGB0; } /* subnormal x */ x *= 0x1p120; return ilogbl(x) - 120; } if (e == 0x7fff) { FORCE_EVAL(0/0.0f); u.i.se = 0; return u.f ? FP_ILOGBNAN : INT_MAX; } return e - 0x3fff; } #endif
3,862
91
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/moddi3.c
/* clang-format off */ /*===-- moddi3.c - Implement __moddi3 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __moddi3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: a % b */ COMPILER_RT_ABI di_int __moddi3(di_int a, di_int b) { const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1; di_int s = b >> bits_in_dword_m1; /* s = b < 0 ? -1 : 0 */ b = (b ^ s) - s; /* negate if s == -1 */ s = a >> bits_in_dword_m1; /* s = a < 0 ? -1 : 0 */ a = (a ^ s) - s; /* negate if s == -1 */ du_int r; __udivmoddi4(a, b, &r); return ((di_int)r ^ s) - s; /* negate if s == -1 */ }
1,133
34
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/addsf3.c
/* clang-format off */ //===-- lib/addsf3.c - Single-precision addition ------------------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements single-precision soft-float addition with the IEEE-754 // default rounding (to nearest, ties to even). // //===----------------------------------------------------------------------===// STATIC_YOINK("huge_compiler_rt_license"); #define SINGLE_PRECISION #include "third_party/compiler_rt/fp_add_impl.inc" COMPILER_RT_ABI float __addsf3(float a, float b) { return __addXf3__(a, b); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI float __aeabi_fadd(float a, float b) { return __addsf3(a, b); } #else AEABI_RTABI float __aeabi_fadd(float a, float b) COMPILER_RT_ALIAS(__addsf3); #endif #endif
1,037
34
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fixxfdi.c
/* clang-format off */ /* ===-- fixxfdi.c - Implement __fixxfdi -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __fixxfdi for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #if !_ARCH_PPC #include "third_party/compiler_rt/int_lib.h" /* Returns: convert a to a signed long long, rounding toward zero. */ /* Assumption: long double is an intel 80 bit floating point type padded with 6 bytes * di_int is a 64 bit integral type * value in long double is representable in di_int (no range checking performed) */ /* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee | * 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */ COMPILER_RT_ABI di_int __fixxfdi(long double a) { const di_int di_max = (di_int)((~(du_int)0) / 2); const di_int di_min = -di_max - 1; long_double_bits fb; fb.f = a; int e = (fb.u.high.s.low & 0x00007FFF) - 16383; if (e < 0) return 0; if ((unsigned)e >= sizeof(di_int) * CHAR_BIT) return a > 0 ? di_max : di_min; di_int s = -(si_int)((fb.u.high.s.low & 0x00008000) >> 15); di_int r = fb.u.low.all; r = (du_int)r >> (63 - e); return (r ^ s) - s; } #endif /* !_ARCH_PPC */
1,641
52
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fp_lib.inc
/* clang-format off */ //===-- lib/fp_lib.h - Floating-point utilities -------------------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file is a configuration header for soft-float routines in compiler-rt. // This file does not provide any part of the compiler-rt interface, but defines // many useful constants and utility routines that are used in the // implementation of the soft-float routines in compiler-rt. // // Assumes that float, double and long double correspond to the IEEE-754 // binary32, binary64 and binary 128 types, respectively, and that integer // endianness matches floating point endianness on the target platform. // //===----------------------------------------------------------------------===// #ifndef FP_LIB_HEADER #define FP_LIB_HEADER #include "libc/literal.h" #include "third_party/compiler_rt/int_lib.h" #include "third_party/compiler_rt/int_math.h" #if defined SINGLE_PRECISION typedef uint32_t rep_t; typedef int32_t srep_t; typedef float fp_t; #define REP_C UINT32_C #define significandBits 23 static __inline int rep_clz(rep_t a) { return __builtin_clz(a); } // 32x32 --> 64 bit multiply static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) { const uint64_t product = (uint64_t)a*b; *hi = product >> 32; *lo = product; } COMPILER_RT_ABI fp_t __addsf3(fp_t a, fp_t b); #elif defined DOUBLE_PRECISION typedef uint64_t rep_t; typedef int64_t srep_t; typedef double fp_t; #define REP_C UINT64_C #define significandBits 52 static __inline int rep_clz(rep_t a) { #if defined __LP64__ return __builtin_clzl(a); #else if (a & REP_C(0xffffffff00000000)) return __builtin_clz(a >> 32); else return 32 + __builtin_clz(a & REP_C(0xffffffff)); #endif } #define loWord(a) (a & 0xffffffffU) #define hiWord(a) (a >> 32) // 64x64 -> 128 wide multiply for platforms that don't have such an operation; // many 64-bit platforms have this operation, but they tend to have hardware // floating-point, so we don't bother with a special case for them here. static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) { // Each of the component 32x32 -> 64 products const uint64_t plolo = loWord(a) * loWord(b); const uint64_t plohi = loWord(a) * hiWord(b); const uint64_t philo = hiWord(a) * loWord(b); const uint64_t phihi = hiWord(a) * hiWord(b); // Sum terms that contribute to lo in a way that allows us to get the carry const uint64_t r0 = loWord(plolo); const uint64_t r1 = hiWord(plolo) + loWord(plohi) + loWord(philo); *lo = r0 + (r1 << 32); // Sum terms contributing to hi with the carry from lo *hi = hiWord(plohi) + hiWord(philo) + hiWord(r1) + phihi; } #undef loWord #undef hiWord COMPILER_RT_ABI fp_t __adddf3(fp_t a, fp_t b); #elif defined QUAD_PRECISION #if __LDBL_MANT_DIG__ == 113 #define CRT_LDBL_128BIT typedef __uint128_t rep_t; typedef __int128_t srep_t; typedef long double fp_t; #define REP_C (__uint128_t) // Note: Since there is no explicit way to tell compiler the constant is a // 128-bit integer, we let the constant be casted to 128-bit integer #define significandBits 112 static __inline int rep_clz(rep_t a) { const union { __uint128_t ll; #if _YUGA_BIG_ENDIAN struct { uint64_t high, low; } s; #else struct { uint64_t low, high; } s; #endif } uu = { .ll = a }; uint64_t word; uint64_t add; if (uu.s.high){ word = uu.s.high; add = 0; } else{ word = uu.s.low; add = 64; } return __builtin_clzll(word) + add; } #define Word_LoMask UINT64_C(0x00000000ffffffff) #define Word_HiMask UINT64_C(0xffffffff00000000) #define Word_FullMask UINT64_C(0xffffffffffffffff) #define Word_1(a) (uint64_t)((a >> 96) & Word_LoMask) #define Word_2(a) (uint64_t)((a >> 64) & Word_LoMask) #define Word_3(a) (uint64_t)((a >> 32) & Word_LoMask) #define Word_4(a) (uint64_t)(a & Word_LoMask) // 128x128 -> 256 wide multiply for platforms that don't have such an operation; // many 64-bit platforms have this operation, but they tend to have hardware // floating-point, so we don't bother with a special case for them here. static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) { const uint64_t product11 = Word_1(a) * Word_1(b); const uint64_t product12 = Word_1(a) * Word_2(b); const uint64_t product13 = Word_1(a) * Word_3(b); const uint64_t product14 = Word_1(a) * Word_4(b); const uint64_t product21 = Word_2(a) * Word_1(b); const uint64_t product22 = Word_2(a) * Word_2(b); const uint64_t product23 = Word_2(a) * Word_3(b); const uint64_t product24 = Word_2(a) * Word_4(b); const uint64_t product31 = Word_3(a) * Word_1(b); const uint64_t product32 = Word_3(a) * Word_2(b); const uint64_t product33 = Word_3(a) * Word_3(b); const uint64_t product34 = Word_3(a) * Word_4(b); const uint64_t product41 = Word_4(a) * Word_1(b); const uint64_t product42 = Word_4(a) * Word_2(b); const uint64_t product43 = Word_4(a) * Word_3(b); const uint64_t product44 = Word_4(a) * Word_4(b); const __uint128_t sum0 = (__uint128_t)product44; const __uint128_t sum1 = (__uint128_t)product34 + (__uint128_t)product43; const __uint128_t sum2 = (__uint128_t)product24 + (__uint128_t)product33 + (__uint128_t)product42; const __uint128_t sum3 = (__uint128_t)product14 + (__uint128_t)product23 + (__uint128_t)product32 + (__uint128_t)product41; const __uint128_t sum4 = (__uint128_t)product13 + (__uint128_t)product22 + (__uint128_t)product31; const __uint128_t sum5 = (__uint128_t)product12 + (__uint128_t)product21; const __uint128_t sum6 = (__uint128_t)product11; const __uint128_t r0 = (sum0 & Word_FullMask) + ((sum1 & Word_LoMask) << 32); const __uint128_t r1 = (sum0 >> 64) + ((sum1 >> 32) & Word_FullMask) + (sum2 & Word_FullMask) + ((sum3 << 32) & Word_HiMask); *lo = r0 + (r1 << 64); *hi = (r1 >> 64) + (sum1 >> 96) + (sum2 >> 64) + (sum3 >> 32) + sum4 + (sum5 << 32) + (sum6 << 64); } #undef Word_1 #undef Word_2 #undef Word_3 #undef Word_4 #undef Word_HiMask #undef Word_LoMask #undef Word_FullMask #endif // __LDBL_MANT_DIG__ == 113 #else #error SINGLE_PRECISION, DOUBLE_PRECISION or QUAD_PRECISION must be defined. #endif #if defined(SINGLE_PRECISION) || defined(DOUBLE_PRECISION) || defined(CRT_LDBL_128BIT) #define typeWidth (sizeof(rep_t)*CHAR_BIT) #define exponentBits (typeWidth - significandBits - 1) #define maxExponent ((1u << exponentBits) - 1) #define exponentBias (maxExponent >> 1) #define implicitBit (REP_C(1) << significandBits) #define significandMask (implicitBit - 1U) #define signBit (REP_C(1) << (significandBits + exponentBits)) #define absMask (signBit - 1U) #define exponentMask (absMask ^ significandMask) #define oneRep ((rep_t)exponentBias << significandBits) #define infRep exponentMask #define quietBit (implicitBit >> 1) #define qnanRep (exponentMask | quietBit) static __inline rep_t toRep(fp_t x) { const union { fp_t f; rep_t i; } rep = {.f = x}; return rep.i; } static __inline fp_t fromRep(rep_t x) { const union { fp_t f; rep_t i; } rep = {.i = x}; return rep.f; } static __inline int normalize(rep_t *significand) { const int shift = rep_clz(*significand) - rep_clz(implicitBit); *significand <<= shift; return 1 - shift; } static __inline void wideLeftShift(rep_t *hi, rep_t *lo, int count) { *hi = *hi << count | *lo >> (typeWidth - count); *lo = *lo << count; } static __inline void wideRightShiftWithSticky(rep_t *hi, rep_t *lo, unsigned int count) { if (count < typeWidth) { const bool sticky = *lo << (typeWidth - count); *lo = *hi << (typeWidth - count) | *lo >> count | sticky; *hi = *hi >> count; } else if (count < 2*typeWidth) { const bool sticky = *hi << (2*typeWidth - count) | *lo; *lo = *hi >> (count - typeWidth) | sticky; *hi = 0; } else { const bool sticky = *hi | *lo; *lo = sticky; *hi = 0; } } // Implements logb methods (logb, logbf, logbl) for IEEE-754. This avoids // pulling in a libm dependency from compiler-rt, but is not meant to replace // it (i.e. code calling logb() should get the one from libm, not this), hence // the __compiler_rt prefix. static __inline fp_t __compiler_rt_logbX(fp_t x) { rep_t rep = toRep(x); int exp = (rep & exponentMask) >> significandBits; // Abnormal cases: // 1) +/- inf returns +inf; NaN returns NaN // 2) 0.0 returns -inf if (exp == maxExponent) { if (((rep & signBit) == 0) || (x != x)) { return x; // NaN or +inf: return x } else { return -x; // -inf: return -x } } else if (x == 0.0) { // 0.0: return -inf return fromRep(infRep | signBit); } if (exp != 0) { // Normal number return exp - exponentBias; // Unbias exponent } else { // Subnormal number; normalize and repeat rep &= absMask; const int shift = 1 - normalize(&rep); exp = (rep & exponentMask) >> significandBits; return exp - exponentBias - shift; // Unbias exponent } } #endif #if defined(SINGLE_PRECISION) static __inline fp_t __compiler_rt_logbf(fp_t x) { return __compiler_rt_logbX(x); } #elif defined(DOUBLE_PRECISION) static __inline fp_t __compiler_rt_logb(fp_t x) { return __compiler_rt_logbX(x); } #elif defined(QUAD_PRECISION) #if defined(CRT_LDBL_128BIT) static __inline fp_t __compiler_rt_logbl(fp_t x) { return __compiler_rt_logbX(x); } #else // The generic implementation only works for ieee754 floating point. For other // floating point types, continue to rely on the libm implementation for now. static __inline long double __compiler_rt_logbl(long double x) { return crt_logbl(x); } #endif #endif #endif // FP_LIB_HEADER
10,606
315
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fixunstfdi.c
/* clang-format off */ /* ===-- fixunstfdi.c - Implement __fixunstfdi -----------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #define QUAD_PRECISION #include "third_party/compiler_rt/fp_lib.inc" #if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) typedef du_int fixuint_t; #include "third_party/compiler_rt/fp_fixuint_impl.inc" COMPILER_RT_ABI du_int __fixunstfdi(fp_t a) { return __fixuint(a); } #endif
713
26
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/int_types.h
/* clang-format off */ /* ===-- int_lib.h - configuration header for compiler-rt -----------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file is not part of the interface of this library. * * This file defines various standard types, most importantly a number of unions * used to access parts of larger types. * * ===----------------------------------------------------------------------=== */ #ifndef INT_TYPES_H #define INT_TYPES_H #include "third_party/compiler_rt/int_endianness.h" /* si_int is defined in Linux sysroot's asm-generic/siginfo.h */ #ifdef si_int #undef si_int #endif typedef int si_int; typedef unsigned su_int; typedef long long di_int; typedef unsigned long long du_int; typedef union { di_int all; struct { #if _YUGA_LITTLE_ENDIAN su_int low; si_int high; #else si_int high; su_int low; #endif /* _YUGA_LITTLE_ENDIAN */ }s; } dwords; typedef union { du_int all; struct { #if _YUGA_LITTLE_ENDIAN su_int low; su_int high; #else su_int high; su_int low; #endif /* _YUGA_LITTLE_ENDIAN */ }s; } udwords; #ifdef CRT_HAS_128BIT typedef __int128 ti_int; typedef unsigned __int128 tu_int; typedef union { ti_int all; struct { #if _YUGA_LITTLE_ENDIAN du_int low; di_int high; #else di_int high; du_int low; #endif /* _YUGA_LITTLE_ENDIAN */ }s; } twords; typedef union { tu_int all; struct { #if _YUGA_LITTLE_ENDIAN du_int low; du_int high; #else du_int high; du_int low; #endif /* _YUGA_LITTLE_ENDIAN */ }s; } utwords; static __inline ti_int make_ti(di_int h, di_int l) { twords r; r.s.high = h; r.s.low = l; return r.all; } static __inline tu_int make_tu(du_int h, du_int l) { utwords r; r.s.high = h; r.s.low = l; return r.all; } #endif /* CRT_HAS_128BIT */ typedef union { su_int u; float f; } float_bits; typedef union { udwords u; double f; } double_bits; typedef struct { #if _YUGA_LITTLE_ENDIAN udwords low; udwords high; #else udwords high; udwords low; #endif /* _YUGA_LITTLE_ENDIAN */ } uqwords; typedef union { uqwords u; long double f; } long_double_bits; #if __STDC_VERSION__ >= 199901L && !defined(__STDC_NO_COMPLEX__) typedef float _Complex Fcomplex; typedef double _Complex Dcomplex; typedef long double _Complex Lcomplex; #define COMPLEX_REAL(x) __real__(x) #define COMPLEX_IMAGINARY(x) __imag__(x) #else typedef struct { float real, imaginary; } Fcomplex; typedef struct { double real, imaginary; } Dcomplex; typedef struct { long double real, imaginary; } Lcomplex; #define COMPLEX_REAL(x) (x).real #define COMPLEX_IMAGINARY(x) (x).imaginary #endif #endif /* INT_TYPES_H */
3,076
162
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/floatdidf.c
/* clang-format off */ /*===-- floatdidf.c - Implement __floatdidf -------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * *===----------------------------------------------------------------------=== * * This file implements __floatdidf for the compiler_rt library. * *===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "libc/literal.h" #include "third_party/compiler_rt/int_lib.h" /* Returns: convert a to a double, rounding toward even. */ /* Assumption: double is a IEEE 64 bit floating point type * di_int is a 64 bit integral type */ /* seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */ #ifndef __SOFT_FP__ /* Support for systems that have hardware floating-point; we'll set the inexact flag * as a side-effect of this computation. */ COMPILER_RT_ABI double __floatdidf(di_int a) { static const double twop52 = 4503599627370496.0; // 0x1.0p52 static const double twop32 = 4294967296.0; // 0x1.0p32 union { int64_t x; double d; } low = { .d = twop52 }; const double high = (int32_t)(a >> 32) * twop32; low.x |= a & INT64_C(0x00000000ffffffff); const double result = (high - twop52) + low.d; return result; } #else /* Support for systems that don't have hardware floating-point; there are no flags to * set, and we don't want to code-gen to an unknown soft-float implementation. */ COMPILER_RT_ABI double __floatdidf(di_int a) { if (a == 0) return 0.0; const unsigned N = sizeof(di_int) * CHAR_BIT; const di_int s = a >> (N-1); a = (a ^ s) - s; int sd = N - __builtin_clzll(a); /* number of significant digits */ int e = sd - 1; /* exponent */ if (sd > DBL_MANT_DIG) { /* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx * finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR * 12345678901234567890123456 * 1 = msb 1 bit * P = bit DBL_MANT_DIG-1 bits to the right of 1 * Q = bit DBL_MANT_DIG bits to the right of 1 * R = "or" of all bits to the right of Q */ switch (sd) { case DBL_MANT_DIG + 1: a <<= 1; break; case DBL_MANT_DIG + 2: break; default: a = ((du_int)a >> (sd - (DBL_MANT_DIG+2))) | ((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG+2) - sd))) != 0); }; /* finish: */ a |= (a & 4) != 0; /* Or P into R */ ++a; /* round - this step may add a significant bit */ a >>= 2; /* dump Q and R */ /* a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits */ if (a & ((du_int)1 << DBL_MANT_DIG)) { a >>= 1; ++e; } /* a is now rounded to DBL_MANT_DIG bits */ } else { a <<= (DBL_MANT_DIG - sd); /* a is now rounded to DBL_MANT_DIG bits */ } double_bits fb; fb.u.s.high = ((su_int)s & 0x80000000) | /* sign */ ((e + 1023) << 20) | /* exponent */ ((su_int)(a >> 32) & 0x000FFFFF); /* mantissa-high */ fb.u.s.low = (su_int)a; /* mantissa-low */ return fb.f; } #endif #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI double __aeabi_l2d(di_int a) { return __floatdidf(a); } #else AEABI_RTABI double __aeabi_l2d(di_int a) COMPILER_RT_ALIAS(__floatdidf); #endif #endif
3,801
120
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/logbl.c
/*-*- mode:c;indent-tabs-mode:t;c-basic-offset:8;tab-width:8;coding:utf-8 -*-│ │vi: set et ft=c ts=8 tw=8 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ Musl Libc │ │ Copyright © 2005-2014 Rich Felker, et al. │ │ │ │ Permission is hereby granted, free of charge, to any person obtaining │ │ a copy of this software and associated documentation files (the │ │ "Software"), to deal in the Software without restriction, including │ │ without limitation the rights to use, copy, modify, merge, publish, │ │ distribute, sublicense, and/or sell copies of the Software, and to │ │ permit persons to whom the Software is furnished to do so, subject to │ │ the following conditions: │ │ │ │ The above copyright notice and this permission notice shall be │ │ included in all copies or substantial portions of the Software. │ │ │ │ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, │ │ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF │ │ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. │ │ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY │ │ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, │ │ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE │ │ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/math.h" asm(".ident\t\"\\n\\n\ Musl libc (MIT License)\\n\ Copyright 2005-2014 Rich Felker, et. al.\""); asm(".include \"libc/disclaimer.inc\""); /* clang-format off */ #if LDBL_MANT_DIG == 53 && LDBL_MAX_EXP == 1024 long double logbl(long double x) { return logb(x); } #else long double logbl(long double x) { if (!isfinite(x)) return x * x; if (x == 0) return -1/(x*x); return ilogbl(x); } #endif
3,026
51
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/udivsi3.c
/* clang-format off */ /* ===-- udivsi3.c - Implement __udivsi3 -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __udivsi3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: a / b */ /* Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide */ /* This function should not call __divsi3! */ COMPILER_RT_ABI su_int __udivsi3(su_int n, su_int d) { const unsigned n_uword_bits = sizeof(su_int) * CHAR_BIT; su_int q; su_int r; unsigned sr; /* special cases */ if (d == 0) return 0; /* ?! */ if (n == 0) return 0; sr = __builtin_clz(d) - __builtin_clz(n); /* 0 <= sr <= n_uword_bits - 1 or sr large */ if (sr > n_uword_bits - 1) /* d > r */ return 0; if (sr == n_uword_bits - 1) /* d == 1 */ return n; ++sr; /* 1 <= sr <= n_uword_bits - 1 */ /* Not a special case */ q = n << (n_uword_bits - sr); r = n >> sr; su_int carry = 0; for (; sr > 0; --sr) { /* r:q = ((r:q) << 1) | carry */ r = (r << 1) | (q >> (n_uword_bits - 1)); q = (q << 1) | carry; /* carry = 0; * if (r.all >= d.all) * { * r.all -= d.all; * carry = 1; * } */ const si_int s = (si_int)(d - r - 1) >> (n_uword_bits - 1); carry = s & 1; r -= d & s; } q = (q << 1) | carry; return q; } #if defined(__ARM_EABI__) AEABI_RTABI su_int __aeabi_uidiv(su_int n, su_int d) COMPILER_RT_ALIAS(__udivsi3); #endif
1,975
72
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/ashrdi3.c
/* clang-format off */ /*===-- ashrdi3.c - Implement __ashrdi3 -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __ashrdi3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: arithmetic a >> b */ /* Precondition: 0 <= b < bits_in_dword */ COMPILER_RT_ABI di_int __ashrdi3(di_int a, si_int b) { const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT); dwords input; dwords result; input.all = a; if (b & bits_in_word) /* bits_in_word <= b < bits_in_dword */ { /* result.s.high = input.s.high < 0 ? -1 : 0 */ result.s.high = input.s.high >> (bits_in_word - 1); result.s.low = input.s.high >> (b - bits_in_word); } else /* 0 <= b < bits_in_word */ { if (b == 0) return a; result.s.high = input.s.high >> b; result.s.low = (input.s.high << (bits_in_word - b)) | (input.s.low >> b); } return result.all; } #if defined(__ARM_EABI__) AEABI_RTABI di_int __aeabi_lasr(di_int a, si_int b) COMPILER_RT_ALIAS(__ashrdi3); #endif
1,485
50
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/comprt.S
#include "libc/macros.internal.h" // Nop ref this to force pull the license into linkage. .section .yoink huge_compiler_rt_license: ret .endobj huge_compiler_rt_license,globl,hidden .previous .ident "\n compiler_rt (Licensed MIT) Copyright (c) 2009-2015 by the contributors listed in: github.com/llvm-mirror/compiler-rt/blob/master/CREDITS.TXT" .ident "\n compiler_rt (Licensed \"University of Illinois/NCSA Open Source License\") Copyright (c) 2009-2018 by the contributors listed in: github.com/llvm-mirror/compiler-rt/blob/master/CREDITS.TXT All rights reserved. Developed by: LLVM Team University of Illinois at Urbana-Champaign http://llvm.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. * Neither the names of the LLVM Team, University of Illinois at Urbana-Champaign, nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE."
2,193
51
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/comparesf2.c
/* clang-format off */ //===-- lib/comparesf2.c - Single-precision comparisons -----------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the following soft-fp_t comparison routines: // // __eqsf2 __gesf2 __unordsf2 // __lesf2 __gtsf2 // __ltsf2 // __nesf2 // // The semantics of the routines grouped in each column are identical, so there // is a single implementation for each, and wrappers to provide the other names. // // The main routines behave as follows: // // __lesf2(a,b) returns -1 if a < b // 0 if a == b // 1 if a > b // 1 if either a or b is NaN // // __gesf2(a,b) returns -1 if a < b // 0 if a == b // 1 if a > b // -1 if either a or b is NaN // // __unordsf2(a,b) returns 0 if both a and b are numbers // 1 if either a or b is NaN // // Note that __lesf2( ) and __gesf2( ) are identical except in their handling of // NaN values. // //===----------------------------------------------------------------------===// STATIC_YOINK("huge_compiler_rt_license"); #define SINGLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" enum LE_RESULT { LE_LESS = -1, LE_EQUAL = 0, LE_GREATER = 1, LE_UNORDERED = 1 }; COMPILER_RT_ABI enum LE_RESULT __lesf2(fp_t a, fp_t b) { const srep_t aInt = toRep(a); const srep_t bInt = toRep(b); const rep_t aAbs = aInt & absMask; const rep_t bAbs = bInt & absMask; // If either a or b is NaN, they are unordered. if (aAbs > infRep || bAbs > infRep) return LE_UNORDERED; // If a and b are both zeros, they are equal. if ((aAbs | bAbs) == 0) return LE_EQUAL; // If at least one of a and b is positive, we get the same result comparing // a and b as signed integers as we would with a fp_ting-point compare. if ((aInt & bInt) >= 0) { if (aInt < bInt) return LE_LESS; else if (aInt == bInt) return LE_EQUAL; else return LE_GREATER; } // Otherwise, both are negative, so we need to flip the sense of the // comparison to get the correct result. (This assumes a twos- or ones- // complement integer representation; if integers are represented in a // sign-magnitude representation, then this flip is incorrect). else { if (aInt > bInt) return LE_LESS; else if (aInt == bInt) return LE_EQUAL; else return LE_GREATER; } } // Alias for libgcc compatibility COMPILER_RT_ABI enum LE_RESULT __cmpsf2(fp_t a, fp_t b) { return __lesf2(a, b); } enum GE_RESULT { GE_LESS = -1, GE_EQUAL = 0, GE_GREATER = 1, GE_UNORDERED = -1 // Note: different from LE_UNORDERED }; COMPILER_RT_ABI enum GE_RESULT __gesf2(fp_t a, fp_t b) { const srep_t aInt = toRep(a); const srep_t bInt = toRep(b); const rep_t aAbs = aInt & absMask; const rep_t bAbs = bInt & absMask; if (aAbs > infRep || bAbs > infRep) return GE_UNORDERED; if ((aAbs | bAbs) == 0) return GE_EQUAL; if ((aInt & bInt) >= 0) { if (aInt < bInt) return GE_LESS; else if (aInt == bInt) return GE_EQUAL; else return GE_GREATER; } else { if (aInt > bInt) return GE_LESS; else if (aInt == bInt) return GE_EQUAL; else return GE_GREATER; } } COMPILER_RT_ABI int __unordsf2(fp_t a, fp_t b) { const rep_t aAbs = toRep(a) & absMask; const rep_t bAbs = toRep(b) & absMask; return aAbs > infRep || bAbs > infRep; } // The following are alternative names for the preceding routines. COMPILER_RT_ABI enum LE_RESULT __eqsf2(fp_t a, fp_t b) { return __lesf2(a, b); } COMPILER_RT_ABI enum LE_RESULT __ltsf2(fp_t a, fp_t b) { return __lesf2(a, b); } COMPILER_RT_ABI enum LE_RESULT __nesf2(fp_t a, fp_t b) { return __lesf2(a, b); } COMPILER_RT_ABI enum GE_RESULT __gtsf2(fp_t a, fp_t b) { return __gesf2(a, b); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI int __aeabi_fcmpun(fp_t a, fp_t b) { return __unordsf2(a, b); } #else AEABI_RTABI int __aeabi_fcmpun(fp_t a, fp_t b) COMPILER_RT_ALIAS(__unordsf2); #endif #endif
4,515
158
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/ctzsi2.c
/* clang-format off */ /* ===-- ctzsi2.c - Implement __ctzsi2 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __ctzsi2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: the number of trailing 0-bits */ /* Precondition: a != 0 */ COMPILER_RT_ABI si_int __ctzsi2(si_int a) { su_int x = (su_int)a; si_int t = ((x & 0x0000FFFF) == 0) << 4; /* if (x has no small bits) t = 16 else 0 */ x >>= t; /* x = [0 - 0xFFFF] + higher garbage bits */ su_int r = t; /* r = [0, 16] */ /* return r + ctz(x) */ t = ((x & 0x00FF) == 0) << 3; x >>= t; /* x = [0 - 0xFF] + higher garbage bits */ r += t; /* r = [0, 8, 16, 24] */ /* return r + ctz(x) */ t = ((x & 0x0F) == 0) << 2; x >>= t; /* x = [0 - 0xF] + higher garbage bits */ r += t; /* r = [0, 4, 8, 12, 16, 20, 24, 28] */ /* return r + ctz(x) */ t = ((x & 0x3) == 0) << 1; x >>= t; x &= 3; /* x = [0 - 3] */ r += t; /* r = [0 - 30] and is even */ /* return r + ctz(x) */ /* The branch-less return statement below is equivalent * to the following switch statement: * switch (x) * { * case 0: * return r + 2; * case 2: * return r + 1; * case 1: * case 3: * return r; * } */ return r + ((2 - (x >> 1)) & -((x & 1) == 0)); }
1,844
61
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/truncdfhf2.c
/* clang-format off */ //===-- lib/truncdfhf2.c - double -> half conversion --------------*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// STATIC_YOINK("huge_compiler_rt_license"); #define SRC_DOUBLE #define DST_HALF #include "third_party/compiler_rt/fp_trunc_impl.inc" COMPILER_RT_ABI uint16_t __truncdfhf2(double a) { return __truncXfYf2__(a); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI uint16_t __aeabi_d2h(double a) { return __truncdfhf2(a); } #else AEABI_RTABI uint16_t __aeabi_d2h(double a) COMPILER_RT_ALIAS(__truncdfhf2); #endif #endif
829
30
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fixtfsi.c
/* clang-format off */ /* ===-- fixtfsi.c - Implement __fixtfsi -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #define QUAD_PRECISION #include "third_party/compiler_rt/fp_lib.inc" #if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) typedef si_int fixint_t; typedef su_int fixuint_t; #include "third_party/compiler_rt/fp_fixint_impl.inc" COMPILER_RT_ABI si_int __fixtfsi(fp_t a) { return __fixint(a); } #endif
733
27
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fixunsdfti.c
/* clang-format off */ /* ===-- fixunsdfti.c - Implement __fixunsdfti -----------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" #ifdef CRT_HAS_128BIT #define DOUBLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" typedef tu_int fixuint_t; #include "third_party/compiler_rt/fp_fixuint_impl.inc" COMPILER_RT_ABI tu_int __fixunsdfti(fp_t a) { return __fixuint(a); } #endif /* CRT_HAS_128BIT */
747
27
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/ctzdi2.c
/* clang-format off */ /* ===-- ctzdi2.c - Implement __ctzdi2 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __ctzdi2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: the number of trailing 0-bits */ #if !defined(__clang__) && \ ((defined(__sparc__) && defined(__arch64__)) || \ defined(__mips64) || \ (defined(__riscv) && __SIZEOF_POINTER__ >= 8)) /* On 64-bit architectures with neither a native clz instruction nor a native * ctz instruction, gcc resolves __builtin_ctz to __ctzdi2 rather than * __ctzsi2, leading to infinite recursion. */ #define __builtin_ctz(a) __ctzsi2(a) extern si_int __ctzsi2(si_int); #endif /* Precondition: a != 0 */ COMPILER_RT_ABI si_int __ctzdi2(di_int a) { dwords x; x.all = a; const si_int f = -(x.s.low == 0); return __builtin_ctz((x.s.high & f) | (x.s.low & ~f)) + (f & ((si_int)(sizeof(si_int) * CHAR_BIT))); }
1,494
44
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/fixsfsi.c
/* clang-format off */ /* ===-- fixsfsi.c - Implement __fixsfsi -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #define SINGLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" typedef si_int fixint_t; typedef su_int fixuint_t; #include "third_party/compiler_rt/fp_fixint_impl.inc" COMPILER_RT_ABI si_int __fixsfsi(fp_t a) { return __fixint(a); } #if defined(__ARM_EABI__) #if defined(COMPILER_RT_ARMHF_TARGET) AEABI_RTABI si_int __aeabi_f2iz(fp_t a) { return __fixsfsi(a); } #else AEABI_RTABI si_int __aeabi_f2iz(fp_t a) COMPILER_RT_ALIAS(__fixsfsi); #endif #endif
893
34
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/ashrti3.c
/* clang-format off */ /* ===-- ashrti3.c - Implement __ashrti3 -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __ashrti3 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" #ifdef CRT_HAS_128BIT /* Returns: arithmetic a >> b */ /* Precondition: 0 <= b < bits_in_tword */ COMPILER_RT_ABI ti_int __ashrti3(ti_int a, si_int b) { const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT); twords input; twords result; input.all = a; if (b & bits_in_dword) /* bits_in_dword <= b < bits_in_tword */ { /* result.s.high = input.s.high < 0 ? -1 : 0 */ result.s.high = input.s.high >> (bits_in_dword - 1); result.s.low = input.s.high >> (b - bits_in_dword); } else /* 0 <= b < bits_in_dword */ { if (b == 0) return a; result.s.high = input.s.high >> b; result.s.low = (input.s.high << (bits_in_dword - b)) | (input.s.low >> b); } return result.all; } #endif /* CRT_HAS_128BIT */
1,429
50
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/ucmpti2.c
/* clang-format off */ /* ===-- ucmpti2.c - Implement __ucmpti2 -----------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __ucmpti2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" #ifdef CRT_HAS_128BIT /* Returns: if (a < b) returns 0 * if (a == b) returns 1 * if (a > b) returns 2 */ COMPILER_RT_ABI si_int __ucmpti2(tu_int a, tu_int b) { utwords x; x.all = a; utwords y; y.all = b; if (x.s.high < y.s.high) return 0; if (x.s.high > y.s.high) return 2; if (x.s.low < y.s.low) return 0; if (x.s.low > y.s.low) return 2; return 1; } #endif /* CRT_HAS_128BIT */
1,097
46
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/powidf2.c
/* clang-format off */ /* ===-- powidf2.cpp - Implement __powidf2 ---------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __powidf2 for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #include "third_party/compiler_rt/int_lib.h" /* Returns: a ^ b */ COMPILER_RT_ABI double __powidf2(double a, si_int b) { const int recip = b < 0; double r = 1; while (1) { if (b & 1) r *= a; b /= 2; if (b == 0) break; a *= a; } return recip ? 1/r : r; }
910
38
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/floatundixf.c
/* clang-format off */ /* ===-- floatundixf.c - Implement __floatundixf ---------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __floatundixf for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #if !_ARCH_PPC #include "third_party/compiler_rt/int_lib.h" /* Returns: convert a to a long double, rounding toward even. */ /* Assumption: long double is a IEEE 80 bit floating point type padded to 128 bits * du_int is a 64 bit integral type */ /* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee | * 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */ COMPILER_RT_ABI long double __floatundixf(du_int a) { if (a == 0) return 0.0; const unsigned N = sizeof(du_int) * CHAR_BIT; int clz = __builtin_clzll(a); int e = (N - 1) - clz ; /* exponent */ long_double_bits fb; fb.u.high.s.low = (e + 16383); /* exponent */ fb.u.low.all = a << clz; /* mantissa */ return fb.f; } #endif /* _ARCH_PPC */
1,431
46
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/floatuntitf.c
/* clang-format off */ //===-- lib/floatuntitf.c - uint128 -> quad-precision conversion --*- C -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements tu_int to quad-precision conversion for the // compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even // mode. // //===----------------------------------------------------------------------===// STATIC_YOINK("huge_compiler_rt_license"); #define QUAD_PRECISION #include "third_party/compiler_rt/fp_lib.inc" #include "third_party/compiler_rt/int_lib.h" /* Returns: convert a tu_int to a fp_t, rounding toward even. */ /* Assumption: fp_t is a IEEE 128 bit floating point type * tu_int is a 128 bit integral type */ /* seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | * mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */ #if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) COMPILER_RT_ABI fp_t __floatuntitf(tu_int a) { if (a == 0) return 0.0; const unsigned N = sizeof(tu_int) * CHAR_BIT; int sd = N - __clzti2(a); /* number of significant digits */ int e = sd - 1; /* exponent */ if (sd > LDBL_MANT_DIG) { /* start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx * finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR * 12345678901234567890123456 * 1 = msb 1 bit * P = bit LDBL_MANT_DIG-1 bits to the right of 1 * Q = bit LDBL_MANT_DIG bits to the right of 1 * R = "or" of all bits to the right of Q */ switch (sd) { case LDBL_MANT_DIG + 1: a <<= 1; break; case LDBL_MANT_DIG + 2: break; default: a = (a >> (sd - (LDBL_MANT_DIG+2))) | ((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG+2) - sd))) != 0); }; /* finish: */ a |= (a & 4) != 0; /* Or P into R */ ++a; /* round - this step may add a significant bit */ a >>= 2; /* dump Q and R */ /* a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits */ if (a & ((tu_int)1 << LDBL_MANT_DIG)) { a >>= 1; ++e; } /* a is now rounded to LDBL_MANT_DIG bits */ } else { a <<= (LDBL_MANT_DIG - sd); /* a is now rounded to LDBL_MANT_DIG bits */ } long_double_bits fb; fb.u.high.all = (du_int)(e + 16383) << 48 /* exponent */ | ((a >> 64) & 0x0000ffffffffffffLL); /* significand */ fb.u.low.all = (du_int)(a); return fb.f; } #endif
2,956
83
jart/cosmopolitan
false
cosmopolitan/third_party/compiler_rt/divsc3.c
/* clang-format off */ /*===-- divsc3.c - Implement __divsc3 -------------------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __divsc3 for the compiler_rt library. * *===----------------------------------------------------------------------=== */ STATIC_YOINK("huge_compiler_rt_license"); #define SINGLE_PRECISION #include "third_party/compiler_rt/fp_lib.inc" #include "third_party/compiler_rt/int_lib.h" #include "third_party/compiler_rt/int_math.h" /* Returns: the quotient of (a + ib) / (c + id) */ COMPILER_RT_ABI Fcomplex __divsc3(float __a, float __b, float __c, float __d) { int __ilogbw = 0; float __logbw = __compiler_rt_logbf(crt_fmaxf(crt_fabsf(__c), crt_fabsf(__d))); if (crt_isfinite(__logbw)) { __ilogbw = (int)__logbw; __c = crt_scalbnf(__c, -__ilogbw); __d = crt_scalbnf(__d, -__ilogbw); } float __denom = __c * __c + __d * __d; Fcomplex z; COMPLEX_REAL(z) = crt_scalbnf((__a * __c + __b * __d) / __denom, -__ilogbw); COMPLEX_IMAGINARY(z) = crt_scalbnf((__b * __c - __a * __d) / __denom, -__ilogbw); if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) { if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b))) { COMPLEX_REAL(z) = crt_copysignf(CRT_INFINITY, __c) * __a; COMPLEX_IMAGINARY(z) = crt_copysignf(CRT_INFINITY, __c) * __b; } else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) && crt_isfinite(__d)) { __a = crt_copysignf(crt_isinf(__a) ? 1 : 0, __a); __b = crt_copysignf(crt_isinf(__b) ? 1 : 0, __b); COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d); COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d); } else if (crt_isinf(__logbw) && __logbw > 0 && crt_isfinite(__a) && crt_isfinite(__b)) { __c = crt_copysignf(crt_isinf(__c) ? 1 : 0, __c); __d = crt_copysignf(crt_isinf(__d) ? 1 : 0, __d); COMPLEX_REAL(z) = 0 * (__a * __c + __b * __d); COMPLEX_IMAGINARY(z) = 0 * (__b * __c - __a * __d); } } return z; }
2,479
67
jart/cosmopolitan
false