max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
9,681 | #!/usr/bin/env python3
import xmlrpc.client
import sys
target = sys.argv[1]
command = sys.argv[2]
with xmlrpc.client.ServerProxy(target) as proxy:
old = getattr(proxy, 'supervisor.readLog')(0,0)
logfile = getattr(proxy, 'supervisor.supervisord.options.logfile.strip')()
getattr(proxy, 'supervisor.supervisord.options.warnings.linecache.os.system')('{} | tee -a {}'.format(command, logfile))
result = getattr(proxy, 'supervisor.readLog')(0,0)
print(result[len(old):])
| 190 |
4,036 | public class ReflectionTest {
public static class TestObject1 {
public TestObject1() { }
}
public static class TestObject2 {
public TestObject2() { }
}
public static class TestObject3 {
public TestObject3() { }
}
public static class TestObject4 {
public TestObject4() { }
}
public static class TestObject4a extends TestObject4 {
public TestObject4a() { }
}
public static void main(String[] args) throws InstantiationException, IllegalAccessException, ClassNotFoundException {
// Get class by name
Class.forName("ReflectionTest$TestObject1").newInstance();
// Use classloader
ReflectionTest.class.getClassLoader().loadClass("ReflectionTest$TestObject2").newInstance();
// Store in variable, load from that
Class<?> clazz = Class.forName("ReflectionTest$TestObject3");
clazz.newInstance();
/*
* We cannot determine the class by looking at a String literal, so we should look to the
* type - in this case Class<? extends TestObject4>. We should therefore identify both
* TestObject4 and TestObject4a as live.
*/
getClass4().newInstance();
}
public static Class<? extends TestObject4> getClass4() {
return TestObject4.class;
}
}
| 361 |
7,737 | <reponame>nonli/cphalcon<filename>ext/phalcon/validation/validator/file/mimetype.zep.h
extern zend_class_entry *phalcon_validation_validator_file_mimetype_ce;
ZEPHIR_INIT_CLASS(Phalcon_Validation_Validator_File_MimeType);
PHP_METHOD(Phalcon_Validation_Validator_File_MimeType, validate);
ZEND_BEGIN_ARG_WITH_RETURN_TYPE_INFO_EX(arginfo_phalcon_validation_validator_file_mimetype_validate, 0, 2, _IS_BOOL, 0)
ZEND_ARG_OBJ_INFO(0, validation, Phalcon\\Validation, 0)
ZEND_ARG_INFO(0, field)
ZEND_END_ARG_INFO()
ZEPHIR_INIT_FUNCS(phalcon_validation_validator_file_mimetype_method_entry) {
PHP_ME(Phalcon_Validation_Validator_File_MimeType, validate, arginfo_phalcon_validation_validator_file_mimetype_validate, ZEND_ACC_PUBLIC)
PHP_FE_END
};
| 327 |
1,822 | <gh_stars>1000+
"""
Tkinter UI for StaSh
"""
import six
from six.moves import tkinter, tkinter_messagebox, tkinter_scrolledtext, queue
from ..shscreens import ShChar
from ..shcommon import K_CC, K_CD, K_HUP, K_HDN, K_LEFT, K_RIGHT, K_CU, K_TAB, K_HIST, K_CZ, K_KB
from .base import ShBaseUI, ShBaseTerminal, ShBaseSequentialRenderer
class ShUI(ShBaseUI):
"""
An UI using the Tkinter module.
"""
def __init__(self, *args, **kwargs):
ShBaseUI.__init__(self, *args, **kwargs)
# ui
self.tk = tkinter.Tk()
self.tk.title("StaSh")
self.tk.protocol("WM_DELETE_WINDOW", self.on_close)
# fullscreen logic
# from: https://stackoverflow.com/a/23840010
self._fullscreen = False
self.tk.bind_all("<F11>", self._toggle_fullscreen)
# terminal
self.terminal = ShTerminal(self.stash, self)
# right click menu
self._rc_menu = tkinter.Menu(self.tk, tearoff=0)
self._rc_menu.add_command(label="Copy", command=self._rc_copy)
self._rc_menu.add_command(label="Paste", command=self._rc_paste)
self._rc_menu.add_command(label="Toggle Fullscreen", command=self._toggle_fullscreen)
self._rc_menu.add_command(label="Quit", command=self.stash.close)
self.tk.bind("<Button-3>", self._popup_rc_menu) # TODO: check <Button-3> portability
def show(self):
self.tk.mainloop()
def close(self):
self.on_exit() # not on_close()
self._close_ui()
def on_close(self):
"""
Called when the window will be closed
"""
if tkinter_messagebox.askokcancel(u"Quit", u"Are you sure you want to quit?"):
self.on_exit()
self._close_ui()
def _close_ui(self):
"""
Actually close the UI.
"""
self.stash.renderer._stop_rendering()
self.tk.destroy()
def history_present(self, history):
window = tkinter.Toplevel(self.tk)
listbox = tkinter.Listbox(window)
listbox.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=1)
scrollbar = tkinter.Scrollbar(window, orient=tkinter.VERTICAL)
scrollbar.config(command=listbox.yview)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
listbox.config(yscrollcommand=scrollbar.set)
# insert data
items = history.getlist()
for line in items:
listbox.insert(tkinter.END, line)
listbox.bind("<Double-Button-1>", lambda e: self._history_selected(window, items, listbox.curselection()))
listbox.bind("<Return>", lambda e: self._history_selected(window, items, listbox.curselection()))
listbox.focus_set()
def _history_selected(self, window, items, idx):
"""
Called when a line was selected from the history popover.
:param window: the history popover window
:type window: tkinter.Toplevel
:param items: list of lines in the history
:type items: list of str
:param idx: selected index
:type idx: int
"""
i = idx[0]
window.destroy()
line = items[i]
self.history_selected(line, i)
def _popup_rc_menu(self, event):
"""
Show self._rc_menu as a popup.
:param event: tkinter event
"""
try:
self._rc_menu.post(event.x_root, event.y_root)
finally:
self._rc_menu.grab_release()
def _rc_copy(self):
"""
Called on "Copy" in rc_menu. Copy selected content to clipboard.
"""
sr = self.terminal.selected_range
selected_text = self.terminal.text[sr[0]:sr[1]]
self.stash.libdist.clipboard_set(selected_text)
def _rc_paste(self):
"""
Called on "Paste" in rc_menu. Paste text from clipboard.
"""
text = self.stash.libdist.clipboard_get()
rng = self.terminal.selected_range
self.stash.user_action_proxy.tv_responder.textview_should_change(None, rng, text)
def _toggle_fullscreen(self, event=None):
"""
Toggle the fullscreen mode.
"""
self._fullscreen = not self._fullscreen
self.tk.attributes("-fullscreen", self._fullscreen)
return "break"
class ShTerminal(ShBaseTerminal):
"""
A Terminal using the Tkinter module
"""
_LOOP_DELAY = 5
_keymapping = { # tkinter event -> StaSh key
"\x03": K_CC, # ctrl-c
"\t": K_TAB, # tab
"\x08": K_HIST, # ctrl-h
"\x1a": K_CZ, # ctrl-z
"\x15": K_CU, # ctrl-u
}
def __init__(self, stash, parent):
ShBaseTerminal.__init__(self, stash, parent)
self._txtvar_out = tkinter.StringVar(self.parent.tk)
self._txtvar_out.trace("w", self._update_text)
self._txt = tkinter_scrolledtext.ScrolledText(
self.parent.tk,
wrap=tkinter.CHAR,
bg=self._color_from_tuple(self.background_color),
fg=self._color_from_tuple(self.text_color),
insertbackground=self._color_from_tuple(self.tint_color),
selectbackground=self._color_from_tuple(self.tint_color),
)
self._txt.pack(fill=tkinter.BOTH, expand=1)
# binding
self._txt.bind("<Key>", self._on_key_press)
self._txt.bind("<FocusIn>", self._on_focus)
self._txt.bind("<FocusOut>", self._on_focus)
self._txt.bind("<Left>", self._arrow_key_pressed)
self._txt.bind("<Right>", self._arrow_key_pressed)
self._txt.bind("<Up>", self._arrow_key_pressed)
self._txt.bind("<Down>", self._arrow_key_pressed)
# we can not yet initialize the color system, so we need to do this later
self._colors_initialized = False
# output queue
self._q = queue.Queue()
self.parent.tk.after(self._LOOP_DELAY, self._loop)
def _loop(self):
try:
v = self._q.get(0)
except queue.Empty:
pass
else:
self._txtvar_out.set(v)
self.parent.tk.after(self._LOOP_DELAY, self._loop)
@property
def text(self):
return self._txt.get("1.0", tkinter.END).replace("\r\n", "\n").replace("\r", "\n")[:-1]
@text.setter
def text(self, value):
self._q.put(value)
def _on_key_press(self, event):
"""
Called when a key was pressed.
:param event: the event which fired this callback
:type event: six.moves.tkinter.Event
"""
# get the current position
cp = self._get_cursor_position() # TODO: check if this must be calculated before or after the keypress
rng = self.selected_range
replacement = event.char
skip_should_change = False # if true, skip should_change
if self.debug:
self.logger.debug("key {!r} pressed (symbol: {!r}; selected: {!r})".format(replacement, event.keysym, rng))
if replacement in ("\r", "\r\n"):
replacement = "\n"
elif replacement == "\x08" and event.keysym != "h":
# backspace (for some reason, same code as ctrl-h)
replacement = u""
if rng[0] == rng[1]:
rng = (rng[0] - 1, rng[1])
elif replacement == "\x7f":
# del
replacement = u""
skip_should_change = True
if rng[0] == rng[1]:
rng = (rng[0], rng[1])
elif replacement in self._keymapping:
self.stash.user_action_proxy.vk_tapped(self._keymapping[replacement])
return "break"
if skip_should_change or self.stash.user_action_proxy.tv_responder.textview_should_change(None, rng, replacement):
self.parent.tk.after(0, self._notify_change)
#self.parent.tk.after(0, self._notify_cursor_move)
else:
# break event
return "break"
# TODO: the cursor probably moved
def _arrow_key_pressed(self, event):
"""
Called when an arrow key was pressed.
"""
d = event.keysym.lower()
if d == "left":
# self.parent.arrowLeftAction()
self.stash.user_action_proxy.vk_tapped(K_LEFT)
elif d == "right":
# self.parent.arrowRightAction()
self.stash.user_action_proxy.vk_tapped(K_RIGHT)
elif d == "up":
# self.parent.arrowUpAction()
self.stash.user_action_proxy.vk_tapped(K_HUP)
elif d == "down":
# self.parent.arrowDownAction()
self.stash.user_action_proxy.vk_tapped(K_HDN)
else:
raise ValueError("Unknown key: {!r}".format(d))
return "break"
def _notify_change(self):
"""
Notify StaSh that the text changed.
"""
self.stash.user_action_proxy.tv_responder.textview_did_change(None)
def _set_text(self, text):
"""
Set the text.
:param text: text to set
:type text: str
"""
self.text = text
def _on_focus(self, event):
"""
Called when the focus was lost.
:param event: the event which fired this callback
:type event: six.moves.tkinter.Event
"""
self.stash.user_action_proxy.tv_responder.textview_did_begin_editing(None)
def _on_focus_loss(self, event):
"""
Called when the focus was lost.
:param event: the event which fired this callback
:type event: six.moves.tkinter.Event
"""
self.stash.user_action_proxy.tv_responder.textview_did_end_editing(None)
def _get_cursor_position(self):
"""
Return the cursor position as a delta from the start.
:return: the cursor position
:rtype: int
"""
v = self._get_absolute_cursor_position()
return self._abs_cursor_pos_to_rel_pos(v)
def _get_absolute_cursor_position(self):
"""
Return the actual cursor position as a tuple of (row, column)
:return: (row, column) of cursor
:rtype: tuple of (int, int)
"""
# source of first line: https://stackoverflow.com/questions/30000368/how-to-get-current-cursor-position-for-text-widget
raw = self._txt.index(tkinter.INSERT)
return self._tk_index_to_tuple(raw)
def _abs_cursor_pos_to_rel_pos(self, value, lines=None):
"""
Convert an absolute cursor position (tuple of (int, int)) into a index relative to the start (int).
'lines' are optional and specify a list of lines on which these calculations should be made.
:param value: value to convert
:type value: tuple of (int, int)
:param lines: alternative lines to calculate position from (default: current lines)
:type lines: list of str
"""
if lines is None:
# get lines
lines = self.text.split("\n")
row, column = value
n = 0
# first, add all lines before the current one
for i in range(row):
line = lines[i]
n += len(line) + 1 # 1 for linebreak
# add column
n += column
# done
return n
def _rel_cursor_pos_to_abs_pos(self, value, lines=None):
"""
Convert a cursor position relative to the start (int) to a tuple of (row, column).
'lines' are optional and specify a list of lines on which these calculations should be made.
:param value: value to convert
:type value: int
:param lines: alternative lines to calculate position from (default: current lines)
:type lines: list of str
"""
if lines is None:
# get lines
lines = self.text.split("\n")
n = value
row = 0
while True:
if row >= len(lines):
# for some reason, we are at the end of the text. this is probably a bug, but lets return an approximate value to the end
return (len(lines) - 1, len(lines[len(lines) - 1]) - 1 )
ll = len(lines[row])
if n <= ll:
# n fits in line
return row, n
else:
# n must be in next line
n -= (ll + 1) # 1 for newline
row += 1
def _tk_index_to_tuple(self, value):
"""
Convert a tkinter index to a tuple of (row, column), starting at 0
:param value: value to convert
:type value: str
:return: the converted value as (row, column), both starting at 0
:rtype: tuple of (int, int)
"""
splitted = value.split(".")
row = int(splitted[0]) - 1
column = int(splitted[1])
return (row, column)
def _tuple_to_tk_index(self, value):
"""
Convert a (row, column) tuple to a tk index.
:param value: value to convert
:type value: tuple of (int, int)
:return: the converted value
:rtype: str
"""
row, column = value
return str(row + 1) + "." + str(column)
def _get_selection_range(self):
"""
Return the index of the currently selected text.
:return: start and end index of the currently selected text
:rtype: tuple of (int, int)
"""
# based on: https://stackoverflow.com/questions/4073468/how-do-i-get-a-selected-string-in-from-a-tkinter-text-box
# check if text is selected
if not self._txt.tag_ranges(tkinter.SEL):
return None, None
raw_start = self._txt.index(tkinter.SEL_FIRST)
raw_end = self._txt.index(tkinter.SEL_LAST)
si = self._tk_index_to_tuple(raw_start)
ei = self._tk_index_to_tuple(raw_end)
rsi = self._abs_cursor_pos_to_rel_pos(si)
rei = self._abs_cursor_pos_to_rel_pos(ei)
return rsi, rei
def _leftmost(self):
"""
Check if the current cursor is at the left end of the modifiable chars.
"""
return self._get_cursor_position() <= self.stash.main_screen.x_modifiable
def _update_text(self, *args):
"""
Update the text
"""
self._txt.delete("1.0", tkinter.END)
out = self._txtvar_out.get()
self._txt.insert("1.0", out)
def _tag_for_char(self, c):
"""
Return the tag to use for the given character.
:param c: character to get tag for
:type c: stash.system.shscreens.ShChar
:return: the tag used for this char
:rtype: str
"""
return self._tag_for_options(
fg=c.fg,
bg=c.bg,
bold=c.bold,
italics=c.italics,
underscore=c.underscore,
strikethrough=c.strikethrough,
reverse=c.reverse,
)
def _tag_for_options(self,
fg="default",
bg="default",
bold=False,
italics=False,
underscore=False,
strikethrough=False,
reverse=False,
):
"""
Return a tag which described the given options.
:param fg: fg color
:type fg: str
:bg: bg color
:type bg: str
:param bold: boldness
:type bold: bool
:param italics: toogle italics
:type italics: bool
:param underscore: toogle underscore
:type underscore: bool
:param striketrough: toogle striketrough
:type striketrough: bool
:param reverse: no idea
:type reverse: bool
:return: a tag which identifies this style
:rtype: str
"""
s = "{}-{}".format(fg, bg)
if bold:
s += "-bold"
if italics:
s += "italics"
if underscore:
s += "-underscore"
if strikethrough:
s += "-strikethrough"
if reverse:
s += "-reverse"
return s
def _add_color_tags(self):
"""
Add the color tags.
"""
# TODO: surely there is a better way of doing this.
self.logger.info("Initializing color system...")
for fg in self.stash.renderer.FG_COLORS:
for bg in self.stash.renderer.BG_COLORS:
for bold in (False, True):
for italics in (False, True):
for underscore in (False, True):
for strikethrough in (False, True):
# striketrough is implemented in replace_in_range()
for reverse in (False, True):
# reverse does not actually seem to be used anywhere
tag = self._tag_for_options(
fg=fg,
bg=bg,
bold=bold,
italics=italics,
underscore=underscore,
strikethrough=strikethrough,
reverse=reverse,
)
kwargs = {}
fontattrs = []
if fg != "default":
kwargs["foreground"] = self.stash.renderer.FG_COLORS[fg]
if bg != "default":
kwargs["background"] = self.stash.renderer.BG_COLORS[bg]
if underscore:
kwargs["underline"] = True
if bold:
fontattrs.append("bold")
if italics:
fontattrs.append("italic")
font = ("Menlo-regular", self.font_size, " ".join(fontattrs))
kwargs["font"] = font
self._txt.tag_config(
tag,
**kwargs
)
# TODO: support for reverse
self._colors_initialized = True
self.logger.info("Color system initialized.")
def _color_from_tuple(self, value):
"""
Convert an rgb color tuple to a hex color
:param value: value to convert
:type value: tuple of (int, int, int)
:return: hexcode of color
:rtype: str
"""
r, g, b = value
r = int(255 * r)
g = int(255 * g)
b = int(255 * b)
hexcode = "#{:02X}{:02X}{:02X}".format(r, g, b)
return hexcode
# ============= api implementation ============
@property
def selected_range(self):
start, end = self._get_selection_range()
if (start is None) or (end is None):
cp = self._get_cursor_position()
return (cp, cp)
else:
return (start, end)
@selected_range.setter
def selected_range(self, value):
assert isinstance(value, tuple)
assert len(value) == 2
assert isinstance(value[0], int) and isinstance(value[1], int)
if value == self.selected_range:
# do nothing
pass
else:
# set cursor synced to false
self.cursor_synced = False
# set tag
start = self._tuple_to_tk_index(self._rel_cursor_pos_to_abs_pos(value[0]))
end = self._tuple_to_tk_index(self._rel_cursor_pos_to_abs_pos(value[1]))
self._txt.tag_add(tkinter.SEL, start, end)
self._txt.mark_set(tkinter.INSERT, end)
# set focus
self.set_focus()
def scroll_to_end(self):
self._txt.see(tkinter.END)
def set_focus(self):
self._txt.focus_set()
def lose_focus(self):
self.parent.tk.focus_set()
def replace_in_range(self, rng, text):
"""
Replace the text in the given range
:param rng: range to replace (start, length)
:type rng: tuple of (int, int)
:param text: text to insert
:type text: iterable of str or ShChar
"""
rstart, length = rng
start, end = self._rel_cursor_pos_to_abs_pos(rstart), self._rel_cursor_pos_to_abs_pos(rstart + length)
tkstart, tkend = self._tuple_to_tk_index(start), self._tuple_to_tk_index(end)
saved = self.selected_range
self._txt.delete(tkstart, tkend)
cp = rstart
for c in text:
a = 1
ctkp = self._tuple_to_tk_index(self._rel_cursor_pos_to_abs_pos(cp))
if isinstance(c, (six.binary_type, six.text_type)):
self._txt.insert(ctkp, c)
elif isinstance(c, ShChar):
if not self._colors_initialized:
self._add_color_tags()
ch = c.data
if c.strikethrough:
ch = u"\u0336" + ch
a += 1
self._txt.insert(ctkp, ch, self._tag_for_char(c))
else:
raise TypeError("Unknown character type {!r}!".format(type(c)))
cp += a
self.selected_range = saved # restore cursor position
def get_wh(self):
"""
Return the number of columns and rows.
:return: number of columns and rows.
:rtype: tuple of (int, int)
"""
return (self._txt.config("width")[4], self._txt.config("height")[4])
class ShSequentialRenderer(ShBaseSequentialRenderer):
"""
ShSequentialBaseRenderer for Tkinter
"""
RENDER_INTERVAL = 1
FG_COLORS = {
'black': "black",
'red': "red",
'green': "green",
'brown': "brown",
'blue': "blue",
'magenta': "magenta",
'cyan': "cyan",
'white': "white",
'gray': "gray",
'yellow': "yellow",
'smoke': "gray64",
'default': "white",
}
BG_COLORS = {
'black': "black",
'red': "red",
'green': "green",
'brown': "brown",
'blue': "blue",
'magenta': "magenta",
'cyan': "cyan",
'white': "white",
'gray': "gray",
'yellow': "yellow",
'smoke': "gray64",
'default': "red",
}
def __init__(self, *args, **kwargs):
ShBaseSequentialRenderer.__init__(self, *args, **kwargs)
self.should_render = False
self._render_loop_active = True
self.stash.ui.tk.after(0, self._renderer_loop)
def _renderer_loop(self):
"""
Internal renderer loop.
"""
if not self._render_loop_active:
# quit loop
return
if self.should_render:
self.should_render = False
self._render()
self.stash.ui.tk.after(self.RENDER_INTERVAL, self._renderer_loop)
def render(self, no_wait=False):
self.should_render = True
def _stop_rendering(self):
"""
Stop the render loop.
"""
self._render_loop_active = False
def _render(self, no_wait=False):
# Lock screen to get atomic information
with self.screen.acquire_lock():
intact_left_bound, intact_right_bound = self.screen.get_bounds()
screen_buffer_length = self.screen.text_length
cursor_xs, cursor_xe = self.screen.cursor_x
renderable_chars = self.screen.renderable_chars
self.screen.clean()
# First remove any leading texts that are rotated out
if intact_left_bound > 0:
self.terminal.replace_in_range((0, intact_left_bound), '')
tv_text_length = self.terminal.text_length # tv_text_length = tvo_texts.length()
# Second (re)render any modified trailing texts
# When there are contents beyond the right bound, either on screen
# or on terminal, the contents need to be re-rendered.
if intact_right_bound < max(tv_text_length, screen_buffer_length):
if len(renderable_chars) > 0:
self.terminal.replace_in_range(
(intact_right_bound,
tv_text_length - intact_right_bound),
# "".join([c.data for c in renderable_chars]),
renderable_chars,
)
else: # empty string, pure deletion
self.terminal.replace_in_range(
(intact_right_bound,
tv_text_length - intact_right_bound),
'',
)
# Set the cursor position. This makes terminal and main screen cursors in sync
self.terminal.selected_range = (cursor_xs, cursor_xe)
# Ensure cursor line is visible by scroll to the end of the text
self.terminal.scroll_to_end()
| 12,817 |
473 | /*
* TI OMAP processor's Multichannel SPI emulation.
*
* Copyright (C) 2007-2009 Nokia Corporation
*
* Original code for OMAP2 by <NAME> <<EMAIL>>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 or
* (at your option) any later version of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "hw/hw.h"
#include "hw/arm/omap.h"
/* Multichannel SPI */
struct omap_mcspi_s {
MemoryRegion iomem;
qemu_irq irq;
int chnum;
uint32_t sysconfig;
uint32_t systest;
uint32_t irqst;
uint32_t irqen;
uint32_t wken;
uint32_t control;
struct omap_mcspi_ch_s {
qemu_irq txdrq;
qemu_irq rxdrq;
uint32_t (*txrx)(void *opaque, uint32_t, int);
void *opaque;
uint32_t tx;
uint32_t rx;
uint32_t config;
uint32_t status;
uint32_t control;
} ch[4];
};
static inline void omap_mcspi_interrupt_update(struct omap_mcspi_s *s)
{
qemu_set_irq(s->irq, s->irqst & s->irqen);
}
static inline void omap_mcspi_dmarequest_update(struct omap_mcspi_ch_s *ch)
{
qemu_set_irq(ch->txdrq,
(ch->control & 1) && /* EN */
(ch->config & (1 << 14)) && /* DMAW */
(ch->status & (1 << 1)) && /* TXS */
((ch->config >> 12) & 3) != 1); /* TRM */
qemu_set_irq(ch->rxdrq,
(ch->control & 1) && /* EN */
(ch->config & (1 << 15)) && /* DMAW */
(ch->status & (1 << 0)) && /* RXS */
((ch->config >> 12) & 3) != 2); /* TRM */
}
static void omap_mcspi_transfer_run(struct omap_mcspi_s *s, int chnum)
{
struct omap_mcspi_ch_s *ch = s->ch + chnum;
if (!(ch->control & 1)) /* EN */
return;
if ((ch->status & (1 << 0)) && /* RXS */
((ch->config >> 12) & 3) != 2 && /* TRM */
!(ch->config & (1 << 19))) /* TURBO */
goto intr_update;
if ((ch->status & (1 << 1)) && /* TXS */
((ch->config >> 12) & 3) != 1) /* TRM */
goto intr_update;
if (!(s->control & 1) || /* SINGLE */
(ch->config & (1 << 20))) { /* FORCE */
if (ch->txrx)
ch->rx = ch->txrx(ch->opaque, ch->tx, /* WL */
1 + (0x1f & (ch->config >> 7)));
}
ch->tx = 0;
ch->status |= 1 << 2; /* EOT */
ch->status |= 1 << 1; /* TXS */
if (((ch->config >> 12) & 3) != 2) /* TRM */
ch->status |= 1 << 0; /* RXS */
intr_update:
if ((ch->status & (1 << 0)) && /* RXS */
((ch->config >> 12) & 3) != 2 && /* TRM */
!(ch->config & (1 << 19))) /* TURBO */
s->irqst |= 1 << (2 + 4 * chnum); /* RX_FULL */
if ((ch->status & (1 << 1)) && /* TXS */
((ch->config >> 12) & 3) != 1) /* TRM */
s->irqst |= 1 << (0 + 4 * chnum); /* TX_EMPTY */
omap_mcspi_interrupt_update(s);
omap_mcspi_dmarequest_update(ch);
}
void omap_mcspi_reset(struct omap_mcspi_s *s)
{
int ch;
s->sysconfig = 0;
s->systest = 0;
s->irqst = 0;
s->irqen = 0;
s->wken = 0;
s->control = 4;
for (ch = 0; ch < 4; ch ++) {
s->ch[ch].config = 0x060000;
s->ch[ch].status = 2; /* TXS */
s->ch[ch].control = 0;
omap_mcspi_dmarequest_update(s->ch + ch);
}
omap_mcspi_interrupt_update(s);
}
static uint64_t omap_mcspi_read(void *opaque, hwaddr addr,
unsigned size)
{
struct omap_mcspi_s *s = (struct omap_mcspi_s *) opaque;
int ch = 0;
uint32_t ret;
if (size != 4) {
return omap_badwidth_read32(opaque, addr);
}
switch (addr) {
case 0x00: /* MCSPI_REVISION */
return 0x91;
case 0x10: /* MCSPI_SYSCONFIG */
return s->sysconfig;
case 0x14: /* MCSPI_SYSSTATUS */
return 1; /* RESETDONE */
case 0x18: /* MCSPI_IRQSTATUS */
return s->irqst;
case 0x1c: /* MCSPI_IRQENABLE */
return s->irqen;
case 0x20: /* MCSPI_WAKEUPENABLE */
return s->wken;
case 0x24: /* MCSPI_SYST */
return s->systest;
case 0x28: /* MCSPI_MODULCTRL */
return s->control;
case 0x68: ch ++;
/* fall through */
case 0x54: ch ++;
/* fall through */
case 0x40: ch ++;
/* fall through */
case 0x2c: /* MCSPI_CHCONF */
return s->ch[ch].config;
case 0x6c: ch ++;
/* fall through */
case 0x58: ch ++;
/* fall through */
case 0x44: ch ++;
/* fall through */
case 0x30: /* MCSPI_CHSTAT */
return s->ch[ch].status;
case 0x70: ch ++;
/* fall through */
case 0x5c: ch ++;
/* fall through */
case 0x48: ch ++;
/* fall through */
case 0x34: /* MCSPI_CHCTRL */
return s->ch[ch].control;
case 0x74: ch ++;
/* fall through */
case 0x60: ch ++;
/* fall through */
case 0x4c: ch ++;
/* fall through */
case 0x38: /* MCSPI_TX */
return s->ch[ch].tx;
case 0x78: ch ++;
/* fall through */
case 0x64: ch ++;
/* fall through */
case 0x50: ch ++;
/* fall through */
case 0x3c: /* MCSPI_RX */
s->ch[ch].status &= ~(1 << 0); /* RXS */
ret = s->ch[ch].rx;
omap_mcspi_transfer_run(s, ch);
return ret;
}
OMAP_BAD_REG(addr);
return 0;
}
static void omap_mcspi_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
struct omap_mcspi_s *s = (struct omap_mcspi_s *) opaque;
int ch = 0;
if (size != 4) {
return omap_badwidth_write32(opaque, addr, value);
}
switch (addr) {
case 0x00: /* MCSPI_REVISION */
case 0x14: /* MCSPI_SYSSTATUS */
case 0x30: /* MCSPI_CHSTAT0 */
case 0x3c: /* MCSPI_RX0 */
case 0x44: /* MCSPI_CHSTAT1 */
case 0x50: /* MCSPI_RX1 */
case 0x58: /* MCSPI_CHSTAT2 */
case 0x64: /* MCSPI_RX2 */
case 0x6c: /* MCSPI_CHSTAT3 */
case 0x78: /* MCSPI_RX3 */
OMAP_RO_REG(addr);
return;
case 0x10: /* MCSPI_SYSCONFIG */
if (value & (1 << 1)) /* SOFTRESET */
omap_mcspi_reset(s);
s->sysconfig = value & 0x31d;
break;
case 0x18: /* MCSPI_IRQSTATUS */
if (!((s->control & (1 << 3)) && (s->systest & (1 << 11)))) {
s->irqst &= ~value;
omap_mcspi_interrupt_update(s);
}
break;
case 0x1c: /* MCSPI_IRQENABLE */
s->irqen = value & 0x1777f;
omap_mcspi_interrupt_update(s);
break;
case 0x20: /* MCSPI_WAKEUPENABLE */
s->wken = value & 1;
break;
case 0x24: /* MCSPI_SYST */
if (s->control & (1 << 3)) /* SYSTEM_TEST */
if (value & (1 << 11)) { /* SSB */
s->irqst |= 0x1777f;
omap_mcspi_interrupt_update(s);
}
s->systest = value & 0xfff;
break;
case 0x28: /* MCSPI_MODULCTRL */
if (value & (1 << 3)) /* SYSTEM_TEST */
if (s->systest & (1 << 11)) { /* SSB */
s->irqst |= 0x1777f;
omap_mcspi_interrupt_update(s);
}
s->control = value & 0xf;
break;
case 0x68: ch ++;
/* fall through */
case 0x54: ch ++;
/* fall through */
case 0x40: ch ++;
/* fall through */
case 0x2c: /* MCSPI_CHCONF */
if ((value ^ s->ch[ch].config) & (3 << 14)) /* DMAR | DMAW */
omap_mcspi_dmarequest_update(s->ch + ch);
if (((value >> 12) & 3) == 3) /* TRM */
fprintf(stderr, "%s: invalid TRM value (3)\n", __FUNCTION__);
if (((value >> 7) & 0x1f) < 3) /* WL */
fprintf(stderr, "%s: invalid WL value (%" PRIx64 ")\n",
__FUNCTION__, (value >> 7) & 0x1f);
s->ch[ch].config = value & 0x7fffff;
break;
case 0x70: ch ++;
/* fall through */
case 0x5c: ch ++;
/* fall through */
case 0x48: ch ++;
/* fall through */
case 0x34: /* MCSPI_CHCTRL */
if (value & ~s->ch[ch].control & 1) { /* EN */
s->ch[ch].control |= 1;
omap_mcspi_transfer_run(s, ch);
} else
s->ch[ch].control = value & 1;
break;
case 0x74: ch ++;
/* fall through */
case 0x60: ch ++;
/* fall through */
case 0x4c: ch ++;
/* fall through */
case 0x38: /* MCSPI_TX */
s->ch[ch].tx = value;
s->ch[ch].status &= ~(1 << 1); /* TXS */
omap_mcspi_transfer_run(s, ch);
break;
default:
OMAP_BAD_REG(addr);
return;
}
}
static const MemoryRegionOps omap_mcspi_ops = {
.read = omap_mcspi_read,
.write = omap_mcspi_write,
.endianness = DEVICE_NATIVE_ENDIAN,
};
struct omap_mcspi_s *omap_mcspi_init(struct omap_target_agent_s *ta, int chnum,
qemu_irq irq, qemu_irq *drq, omap_clk fclk, omap_clk iclk)
{
struct omap_mcspi_s *s = (struct omap_mcspi_s *)
g_malloc0(sizeof(struct omap_mcspi_s));
struct omap_mcspi_ch_s *ch = s->ch;
s->irq = irq;
s->chnum = chnum;
while (chnum --) {
ch->txdrq = *drq ++;
ch->rxdrq = *drq ++;
ch ++;
}
omap_mcspi_reset(s);
memory_region_init_io(&s->iomem, NULL, &omap_mcspi_ops, s, "omap.mcspi",
omap_l4_region_size(ta, 0));
omap_l4_attach(ta, 0, &s->iomem);
return s;
}
void omap_mcspi_attach(struct omap_mcspi_s *s,
uint32_t (*txrx)(void *opaque, uint32_t, int), void *opaque,
int chipselect)
{
if (chipselect < 0 || chipselect >= s->chnum)
hw_error("%s: Bad chipselect %i\n", __FUNCTION__, chipselect);
s->ch[chipselect].txrx = txrx;
s->ch[chipselect].opaque = opaque;
}
| 5,583 |
2,077 | /**
Copyright (c) 2015-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
*/
#ifndef __plist_Format_XMLParser_h
#define __plist_Format_XMLParser_h
#include <plist/Format/BaseXMLParser.h>
#include <plist/Object.h>
namespace plist {
namespace Format {
class XMLParser : public BaseXMLParser {
private:
struct Key {
std::string value;
bool valid;
bool active;
};
struct State {
typedef std::vector <State> vector;
Object *current;
Key key;
};
private:
Object *_root;
State::vector _stack;
State _state;
std::string _cdata;
public:
XMLParser();
public:
Object *parse(std::vector<uint8_t> const &contents);
private:
virtual void onBeginParse();
virtual void onEndParse(bool success);
private:
void onStartElement(std::string const &name, std::unordered_map<std::string, std::string> const &attrs, size_t depth);
void onEndElement(std::string const &name, size_t depth);
void onCharacterData(std::string const &cdata, size_t depth);
private:
void push(Object *object);
void pop();
private:
inline bool inArray() const;
inline bool inDictionary() const;
inline bool inContainer(size_t depth) const;
inline bool isExpectingKey() const;
inline bool isExpectingCDATA() const;
private:
bool beginObject(std::string const &name, size_t depth);
bool beginArray();
bool beginDictionary();
bool beginString();
bool beginInteger();
bool beginReal();
bool beginBoolean(bool value);
bool beginNull();
bool beginData();
bool beginDate();
bool beginKey();
private:
bool endObject(std::string const &name);
bool endArray();
bool endDictionary();
bool endString();
bool endInteger();
bool endReal();
bool endBoolean();
bool endNull();
bool endData();
bool endDate();
bool endKey();
};
}
}
#endif // !__plist_Format_XMLParser_h
| 798 |
496 | <gh_stars>100-1000
{
"location": "westus2",
"name": "basepi-test",
"resourceGroupName": "basepi-testing",
"subscriptionId": "7657426d-c4c3-44ac-88a2-3b2cd59e6dba",
"vmId": "e11ebedc-019d-427f-84dd-56cd4388d3a8",
"vmScaleSetName": "",
"vmSize": "Standard_D2s_v3",
"zone": ""
}
| 147 |
4,879 | #pragma once
#include <cstdint>
#include <string>
namespace coding
{
// Computes the Burrows-Wheeler transform of the string |s|, stores
// result in the string |r|. Note - the size of |r| must be |n|.
// Returns the index of the original string among the all sorted
// rotations of the |s|.
//
// *NOTE* in contrast to popular explanations of BWT, we do not append
// to |s| trailing '$' that is less than any other character in |s|.
// The reason is that |s| can be an arbitrary byte string, with zero
// bytes inside, so implementation of this trailing '$' is expensive,
// and, actually, not needed.
//
// For example, if |s| is "abaaba", canonical BWT is:
//
// Sorted rotations: canonical BWT:
// $abaaba a
// a$abaab b
// aaba$ab b
// aba$aba a
// * abaaba$ $
// ba$abaa a
// baaba$a a
//
// where '*' denotes original string.
//
// Our implementation will sort rotations in a way as there is an
// implicit '$' that is less than any other byte in |s|, but does not
// return this '$'. Therefore, the order of rotations will be the same
// as above, without the first '$abaaba':
//
// Sorted rotations: ours BWT:
// aabaab b
// aabaab b
// abaaba a
// * abaaba a
// baabaa a
// baabaa a
//
// where '*' denotes the index of original string. As one can see,
// there are two 'abaaba' strings, but as mentioned, rotations are
// sorted like there is an implicit '$' at the end of the original
// string. It's possible to get from "ours BWT" to the "original BWT",
// see the code for details.
//
// Complexity: O(n) time and O(n) memory.
size_t BWT(size_t n, uint8_t const * s, uint8_t * r);
size_t BWT(std::string const & s, std::string & r);
// Inverse Burrows-Wheeler transform.
//
// Complexity: O(n) time and O(n) memory.
void RevBWT(size_t n, size_t start, uint8_t const * s, uint8_t * r);
void RevBWT(size_t start, std::string const & s, std::string & r);
} // namespace coding
| 881 |
1,144 | <gh_stars>1000+
/**
*
*/
package de.metas.callcenter.model;
/*
* #%L
* de.metas.swat.base
* %%
* Copyright (C) 2015 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
import java.sql.ResultSet;
import java.sql.Timestamp;
import java.util.Properties;
import org.adempiere.exceptions.AdempiereException;
import org.compiere.model.I_C_BPartner;
import org.compiere.model.I_R_Group;
import org.compiere.model.I_R_Request;
import org.compiere.model.Query;
import org.compiere.util.DB;
import org.compiere.util.Env;
import org.compiere.util.TimeUtil;
/**
* @author <NAME>, <EMAIL>
*/
public class MRGroupProspect extends X_R_Group_Prospect
{
/**
*
*/
private static final long serialVersionUID = -8146063466656352670L;
/** Lock expire time (minutes) */
public static final int LOCK_EXPIRE_MIN = 24 * 60;
public static MRGroupProspect get(Properties ctx, I_RV_R_Group_Prospect contact, String trxName)
{
String whereClause = COLUMNNAME_R_Group_ID+"=?"
+" AND "+COLUMNNAME_C_BPartner_ID+"=?"
+" AND "+COLUMNNAME_AD_User_ID+"=?";
MRGroupProspect gp = new Query(ctx, Table_Name, whereClause, trxName)
.setParameters(new Object[]{
contact.getR_Group_ID(),
contact.getC_BPartner_ID(),
contact.getAD_User_ID()})
.firstOnly();
return gp;
}
public static MRGroupProspect get(Properties ctx, I_R_Request request, String trxName)
{
final String whereClause = COLUMNNAME_R_Group_ID+"=?"
+" AND "+COLUMNNAME_C_BPartner_ID+"=?"
+" AND "+COLUMNNAME_AD_User_ID+"=?";
MRGroupProspect gp = new Query(ctx, Table_Name, whereClause, trxName)
.setParameters(new Object[]{
request.getR_Group_ID(),
request.getC_BPartner_ID(),
request.getAD_User_ID()})
.firstOnly();
return gp;
}
/**
* Check if contact already added
* @param ctx
* @param R_Group_ID bundle
* @param C_BPartner_ID partner
* @param AD_User_ID (ignored)
* @param trxName
* @return
*/
public static boolean existContact(Properties ctx, int R_Group_ID, int C_BPartner_ID, int AD_User_ID, String trxName)
{
final String whereClause = COLUMNNAME_R_Group_ID+"=?"
+" AND "+COLUMNNAME_C_BPartner_ID+"=?"
// +" AND "+COLUMNNAME_AD_User_ID+"=?"
;
boolean match = new Query(ctx, Table_Name, whereClause, trxName)
.setParameters(new Object[]{R_Group_ID, C_BPartner_ID})
.anyMatch();
return match;
}
public static void linkRequest(Properties ctx, I_R_Request request, String trxName)
{
MRGroupProspect gp = get(ctx, request, trxName);
if (gp == null)
return; // TODO: throw error?
gp.setR_Request_ID(request.getR_Request_ID());
gp.unlockContact();
gp.saveEx();
}
public MRGroupProspect(Properties ctx, int id, String trxName)
{
super(ctx, id, trxName);
}
public MRGroupProspect(Properties ctx, ResultSet rs, String trxName)
{
super(ctx, rs, trxName);
}
/**
* Creates a new record.
*/
public MRGroupProspect(Properties ctx, int R_Group_ID, int C_BPartner_ID, int AD_User_ID, String trxName)
{
super(ctx, 0, trxName);
setR_Group_ID(R_Group_ID);
setC_BPartner_ID(C_BPartner_ID);
setAD_User_ID(AD_User_ID);
}
@Override
protected boolean afterSave(boolean newRecord, boolean success)
{
if (!success)
return success;
BundleUtil.updateCCM_Bundle_Status(getR_Group_ID(), get_TrxName());
return true;
}
@Override
protected boolean beforeDelete()
{
if (getR_Request_ID() > 0)
{
throw new AdempiereException("@R_Request_ID@");
}
expireLock();
if (isLocked())
{
throw new AdempiereException("de.metas.callcenter.CannotDeleteLocked");
}
return true;
}
@Override
protected boolean afterDelete(boolean success)
{
if (!success)
return success;
BundleUtil.updateCCM_Bundle_Status(getR_Group_ID(), get_TrxName());
return true;
}
public void lockContact()
{
int AD_User_ID = Env.getAD_User_ID(getCtx());
Timestamp ts = new Timestamp(System.currentTimeMillis());
setLocked(true);
setLockedBy(AD_User_ID);
setLockedDate(ts);
}
public void unlockContact()
{
setLocked(false);
set_Value(COLUMNNAME_LockedBy, null);
setLockedDate(null);
}
public boolean isExpired()
{
if (!isLocked())
return true;
Timestamp dateExpire = TimeUtil.addMinutes(getLockedDate(), LOCK_EXPIRE_MIN);
Timestamp now = new Timestamp(System.currentTimeMillis());
return dateExpire.before(now);
}
public void expireLock()
{
if (isLocked() && isExpired())
unlockContact();
}
@Override
public String toString()
{
String bundleName = DB.getSQLValueString(get_TrxName(),
"SELECT "+I_R_Group.COLUMNNAME_Name+" FROM "+I_R_Group.Table_Name
+" WHERE "+I_R_Group.COLUMNNAME_R_Group_ID+"=?",
getR_Group_ID());
String bpName = DB.getSQLValueString(get_TrxName(),
"SELECT "+I_C_BPartner.COLUMNNAME_Value+"||'_'||"+I_C_BPartner.COLUMNNAME_Name
+" FROM "+I_C_BPartner.Table_Name
+" WHERE "+I_C_BPartner.COLUMNNAME_C_BPartner_ID+"=?",
getC_BPartner_ID());
return bundleName+"/"+bpName;
}
}
| 2,265 |
680 | package org.ff4j.property;
/*
* #%L
* ff4j-core
* %%
* Copyright (C) 2013 - 2016 FF4J
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
/**
* Creatoin of property.
*
* @author <NAME> (@clunven)
*/
public class PropertyLocalDateTime extends Property< LocalDateTime > {
/** serialVersionUID. */
private static final long serialVersionUID = -620523134883483837L;
/** formatter for creation date and last modified. */
protected static final DateTimeFormatter FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS");
/**
* Constructor by property name.
*
* @param name
* property name
*/
public PropertyLocalDateTime(String name) {
super(name);
}
/**
* Constructor by string expression.
*
* @param uid
* unique name
* @param lvl
* current log level
*/
public PropertyLocalDateTime(String uid, String value) {
super(uid, value);
}
/**
* Constructor by string expression.
*
* @param uid
* unique name
* @param lvl
* current log level
*/
public PropertyLocalDateTime(String uid, LocalDateTime date) {
super(uid, date);
}
/**
* Serialized value as String
*
* @return
* current value as a string or null
*/
public String asString() {
if (value == null) return null;
return value.format(FORMATTER);
}
/** {@inheritDoc} */
@Override
public LocalDateTime fromString(String v) {
return LocalDateTime.parse(v, FORMATTER);
}
}
| 841 |
1,389 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import random
import argparse
import numpy as np
from tqdm import tqdm
from tensorboardX import SummaryWriter
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import pgl
from pgl.utils.data import Dataloader
### importing OGB-LSC
from ogb.lsc import PCQM4MDataset, PCQM4MEvaluator
from ogb.utils import smiles2graph
from dataset import MolDataset, Subset, CollateFn
from gnn import GNN
reg_criterion = paddle.nn.loss.L1Loss()
def train(model, loader, optimizer):
model.train()
loss_accum = 0
for step, (g, labels) in enumerate(tqdm(loader, desc="Iteration")):
g = g.tensor()
labels = paddle.to_tensor(labels)
pred = paddle.reshape(model(g), shape=[-1, ])
loss = reg_criterion(pred, labels)
loss.backward()
optimizer.step()
optimizer.clear_grad()
loss_accum += loss.numpy()
return loss_accum / (step + 1)
@paddle.no_grad()
def eval(model, loader, evaluator):
model.eval()
y_true = []
y_pred = []
for step, (g, labels) in enumerate(tqdm(loader, desc="Iteration")):
g = g.tensor()
# labels = paddle.to_tensor(labels)
pred = model(g)
y_true.append(labels.reshape(-1, 1))
y_pred.append(pred.numpy().reshape(-1, 1))
y_true = np.concatenate(y_true).reshape(-1, )
y_pred = np.concatenate(y_pred).reshape(-1, )
input_dict = {"y_true": y_true, "y_pred": y_pred}
return evaluator.eval(input_dict)["mae"]
@paddle.no_grad()
def test(model, loader):
model.eval()
y_pred = []
for step, (g, labels) in enumerate(tqdm(loader, desc="Iteration")):
g = g.tensor()
# labels = paddle.to_tensor(labels)
pred = model(g)
y_pred.append(pred.numpy().reshape(-1, 1))
y_pred = np.concatenate(y_pred).reshape(-1, )
return y_pred
def main():
# Training settings
parser = argparse.ArgumentParser(
description='GNN baselines on pcqm4m with PGL')
parser.add_argument('--use_cuda', action='store_true')
parser.add_argument(
'--device',
type=int,
default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument(
'--gnn',
type=str,
default='gin-virtual',
help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gin-virtual)'
)
parser.add_argument(
'--graph_pooling',
type=str,
default='sum',
help='graph pooling strategy mean or sum (default: sum)')
parser.add_argument(
'--drop_ratio',
type=float,
default=0,
help='dropout ratio (default: 0)')
parser.add_argument(
'--num_layers',
type=int,
default=5,
help='number of GNN message passing layers (default: 5)')
parser.add_argument(
'--emb_dim',
type=int,
default=600,
help='dimensionality of hidden units in GNNs (default: 600)')
parser.add_argument('--train_subset', action='store_true')
parser.add_argument(
'--batch_size',
type=int,
default=256,
help='input batch size for training (default: 256)')
parser.add_argument(
'--epochs',
type=int,
default=100,
help='number of epochs to train (default: 100)')
parser.add_argument(
'--num_workers',
type=int,
default=1,
help='number of workers (default: 1)')
parser.add_argument(
'--log_dir', type=str, default="", help='tensorboard log directory')
parser.add_argument(
'--checkpoint_dir',
type=str,
default='',
help='directory to save checkpoint')
parser.add_argument(
'--save_test_dir',
type=str,
default='',
help='directory to save test submission file')
args = parser.parse_args()
print(args)
random.seed(42)
np.random.seed(42)
paddle.seed(42)
if not args.use_cuda:
paddle.set_device("cpu")
### automatic dataloading and splitting
class Config():
def __init__(self):
self.base_data_path = "./dataset"
config = Config()
ds = MolDataset(config)
split_idx = ds.get_idx_split()
train_ds = Subset(ds, split_idx['train'])
valid_ds = Subset(ds, split_idx['valid'])
test_ds = Subset(ds, split_idx['test'])
print("Train exapmles: ", len(train_ds))
print("Valid exapmles: ", len(valid_ds))
print("Test exapmles: ", len(test_ds))
### automatic evaluator. takes dataset name as input
evaluator = PCQM4MEvaluator()
train_loader = Dataloader(
train_ds,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
collate_fn=CollateFn())
valid_loader = Dataloader(
valid_ds,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=CollateFn())
if args.save_test_dir is not '':
test_loader = Dataloader(
test_ds,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=CollateFn())
if args.checkpoint_dir is not '':
os.makedirs(args.checkpoint_dir, exist_ok=True)
shared_params = {
'num_layers': args.num_layers,
'emb_dim': args.emb_dim,
'drop_ratio': args.drop_ratio,
'graph_pooling': args.graph_pooling
}
if args.gnn == 'gin':
model = GNN(gnn_type='gin', virtual_node=False, **shared_params)
elif args.gnn == 'gin-virtual':
model = GNN(gnn_type='gin', virtual_node=True, **shared_params)
elif args.gnn == 'gcn':
model = GNN(gnn_type='gcn', virtual_node=False, **shared_params)
elif args.gnn == 'gcn-virtual':
model = GNN(gnn_type='gcn', virtual_node=True, **shared_params)
else:
raise ValueError('Invalid GNN type')
num_params = sum(p.numel() for p in model.parameters())
print(f'#Params: {num_params}')
if args.log_dir is not '':
writer = SummaryWriter(log_dir=args.log_dir)
best_valid_mae = 1000
scheduler = paddle.optimizer.lr.StepDecay(
learning_rate=0.001, step_size=300, gamma=0.25)
optimizer = paddle.optimizer.Adam(
learning_rate=scheduler, parameters=model.parameters())
msg = "ogbg_lsc_paddle_baseline\n"
for epoch in range(1, args.epochs + 1):
print("=====Epoch {}".format(epoch))
print('Training...')
train_mae = train(model, train_loader, optimizer)
print('Evaluating...')
valid_mae = eval(model, valid_loader, evaluator)
print({'Train': train_mae, 'Validation': valid_mae})
if args.log_dir is not '':
writer.add_scalar('valid/mae', valid_mae, epoch)
writer.add_scalar('train/mae', train_mae, epoch)
if valid_mae < best_valid_mae:
best_valid_mae = valid_mae
if args.checkpoint_dir is not '':
print('Saving checkpoint...')
paddle.save(model.state_dict(),
os.path.join(args.checkpoint_dir,
'checkpoint.pdparams'))
if args.save_test_dir is not '':
print('Predicting on test data...')
y_pred = test(model, test_loader)
print('Saving test submission file...')
evaluator.save_test_submission({
'y_pred': y_pred
}, args.save_test_dir)
scheduler.step()
print(f'Best validation MAE so far: {best_valid_mae}')
try:
msg +="Epoch: %d | Train: %.6f | Valid: %.6f | Best Valid: %.6f\n" \
% (epoch, train_mae, valid_mae, best_valid_mae)
print(msg)
except:
continue
if args.log_dir is not '':
writer.close()
if __name__ == "__main__":
main()
| 3,912 |
458 | package cppclassanalyzer.plugin;
import ghidra.framework.plugintool.util.PluginPackage;
import resources.ResourceManager;
/**
* The {@link PluginPackage} for the {@value #NAME}
*/
public class CppClassAnalyzerPluginPackage extends PluginPackage {
public static final String NAME = "Ghidra C++ Class Analyzer";
private static final String DESCRIPTION = "These plugins are for analyzing C++ Classes.";
public CppClassAnalyzerPluginPackage() {
super(NAME, ResourceManager.loadImage("images/cpp_logo.png"), DESCRIPTION);
}
}
| 153 |
575 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef THIRD_PARTY_BLINK_RENDERER_CORE_PAGE_SCROLLING_SNAP_COORDINATOR_H_
#define THIRD_PARTY_BLINK_RENDERER_CORE_PAGE_SCROLLING_SNAP_COORDINATOR_H_
#include "base/macros.h"
#include "cc/input/scroll_snap_data.h"
#include "cc/input/snap_selection_strategy.h"
#include "third_party/blink/renderer/core/core_export.h"
#include "third_party/blink/renderer/core/css/css_primitive_value_mappings.h"
#include "third_party/blink/renderer/platform/heap/handle.h"
namespace blink {
class LayoutBox;
// Snap Coordinator keeps track of snap containers and all of their associated
// snap areas.
//
// Snap container:
// A scroll container that has 'scroll-snap-type' value other
// than 'none'.
// However, we maintain a snap container entry for a scrollable area even if
// its snap type is 'none'. This is because while the scroller does not snap,
// it still captures the snap areas in its subtree.
// Snap area:
// A snap container's descendant that contributes snap positions. An element
// only contributes snap positions to its nearest ancestor (on the element’s
// containing block chain) scroll container.
//
// For more information see spec: https://drafts.csswg.org/css-snappoints/
class CORE_EXPORT SnapCoordinator final
: public GarbageCollected<SnapCoordinator> {
public:
explicit SnapCoordinator();
~SnapCoordinator();
void Trace(Visitor* visitor) const {}
void AddSnapContainer(LayoutBox& snap_container);
void RemoveSnapContainer(LayoutBox& snap_container);
void SnapContainerDidChange(LayoutBox&);
void SnapAreaDidChange(LayoutBox&, cc::ScrollSnapAlign);
// Calculate the SnapAreaData for the specific snap area in its snap
// container.
cc::SnapAreaData CalculateSnapAreaData(const LayoutBox& snap_area,
const LayoutBox& snap_container);
bool AnySnapContainerDataNeedsUpdate() const {
return any_snap_container_data_needs_update_;
}
void SetAnySnapContainerDataNeedsUpdate(bool needs_update) {
any_snap_container_data_needs_update_ = needs_update;
}
// Called by Document::PerformScrollSnappingTasks() whenever a style or layout
// change happens. This will update all snap container data that was affected
// by the style/layout change.
void UpdateAllSnapContainerDataIfNeeded();
// Resnaps all snap containers to their current snap target, or to the
// closest snap point if there is no target (e.g. on the initial layout or if
// the previous snapped target was removed).
void ResnapAllContainersIfNeeded();
void UpdateSnapContainerData(LayoutBox&);
#ifndef NDEBUG
void ShowSnapAreaMap();
void ShowSnapAreasFor(const LayoutBox*);
void ShowSnapDataFor(const LayoutBox*);
#endif
private:
friend class SnapCoordinatorTest;
HashSet<LayoutBox*> snap_containers_;
bool any_snap_container_data_needs_update_ = true;
// Used for reporting to UMA when snapping on the initial layout affects the
// initial scroll position.
bool did_first_resnap_all_containers_ = false;
DISALLOW_COPY_AND_ASSIGN(SnapCoordinator);
};
} // namespace blink
#endif // THIRD_PARTY_BLINK_RENDERER_CORE_PAGE_SCROLLING_SNAP_COORDINATOR_H_
| 1,065 |
5,168 | <gh_stars>1000+
/**
* \file src/gopt/include/megbrain/gopt/misc.h
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#pragma once
#include <vector>
#include "megbrain/gopt/framework.h"
namespace mgb {
namespace gopt {
/*!
* \brief remove oprs unrelated to computing, such as
* MarkNoBroadcastElemwise
*/
class RemoveNonComputingOprPass final : public Pass {
public:
const char* name() const override;
void apply(OptState& opt) const override;
};
/*!
* \brief expand VirtualGrad opr to actual grads
*/
class ExpandVirtualGradPass final : public Pass {
public:
const char* name() const override;
void apply(OptState& opt) const override;
};
/*!
* \brief delay Broadcast opr after a chain of unary oprs.
*/
class DelayBroadcastPass final : public Pass {
static bool allowed_opr(OperatorNodeBase*);
public:
const char* name() const override;
void apply(OptState& opt) const override;
};
/*!
* \brief recompute the TypeCvt if input's dtype_size > output's dtype_size
* and long-term dependency exists.
* Reduce the memory usage.
*/
class RecompTypeCvtPass final : public Pass {
public:
RecompTypeCvtPass(size_t threshold = 20) : m_threshold(threshold) {}
const char* name() const override;
void apply(OptState& opt) const override;
private:
//! device whether need to recompute, if the timestamp between two operators
//! exceeding it.
size_t m_threshold;
};
/*!
* \brief Combine TypeCvt and Reduce operator into a single Reduce opr.
* For now, we support 16 -> 32 only.
*/
class CombineAstypeAndReducePass final : public Pass {
public:
const char* name() const override;
void apply(OptState& opt) const override;
};
class RemoveRedundantTypeCvtPass final : public Pass {
private:
//! Should we remove the TypeCvt chain of form A -> B -> A?
static bool should_remove(DType A, DType B);
public:
const char* name() const override;
void apply(OptState& opt) const override;
};
class RemoveRedundantCopyPass final : public Pass {
private:
//! Remove the copy chain of form cpu -> cpu -> cpu,
//! cpu -> gpu -> cpu
static bool should_remove(const CompNode& A, const CompNode& B);
public:
const char* name() const override;
void apply(OptState& opt) const override;
};
//! remove execution mask for const PPVs in conditional execution
class CondExecConstPredicateFolding final : public Pass {
public:
const char* name() const override;
void apply(OptState& opt) const override;
};
//! scan allreduces of param grads
class PackAllReduceScanPass final : public Pass {
public:
const char* name() const override;
void apply(OptState& opt) const override;
private:
// check pattern param -> grad -> allreduce
static bool check_pattern(OperatorNodeBase* opr);
};
//! pack allreduces of param grads
class PackAllReduceReplacePass final : public Pass {
public:
class GroupInfo;
const char* name() const override;
void apply(OptState& opt) const override;
// collect allreduces and divide into groups
static uint64_t collect_groups(
OperatorNodeBase* opr,
ThinHashMap<uint64_t, std::shared_ptr<GroupInfo>>& group_info,
ThinHashMap<uint64_t, cg::OprNodeArray>& groups);
// divide groups into packs, max_size in MB
static void divide_packs(
const ThinHashMap<uint64_t, cg::OprNodeArray>& groups,
ThinHashMap<uint64_t, std::vector<cg::OprNodeArray>>& packs,
size_t max_size);
// insert packed operators and update replace_map
static void insert_packed_oprs(
size_t pack_id, const cg::OprNodeArray& pack,
std::shared_ptr<GroupInfo> info,
ThinHashMap<VarNode*, VarNode*>& replace_map, int priority);
};
class RemoveShapeHintPass final : public Pass {
public:
const char* name() const override;
void apply(OptState& opt) const override;
};
} // namespace gopt
} // namespace mgb
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
| 1,489 |
8,232 | // Copyright (c) Microsoft Corporation.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#include <assert.h>
#include <atomic>
#include <regex>
#include <thread>
#include <vector>
using namespace std;
const int REPEATS = 50; // Repeat the whole test this many times.
const int N = 4; // Spin up N matching threads and N copying threads.
const int COPIES = 500000; // Each copying thread performs this many copies.
int main() {
atomic<bool> atom_success(true);
for (int repeat = 0; repeat < REPEATS; ++repeat) {
vector<thread> threads;
atomic<int> atom_copiers(N);
const regex r("a+b+c+");
for (int i = 0; i < N; ++i) {
threads.emplace_back([&atom_copiers, &r, &atom_success] {
while (atom_copiers > 0) {
const regex dupe(r);
if (!regex_match("aaabbbccc", dupe)) {
atom_success = false;
}
}
});
}
for (int i = 0; i < N; ++i) {
threads.emplace_back([&atom_copiers, &r] {
for (int k = 0; k < COPIES; ++k) {
const regex dupe(r);
}
--atom_copiers;
});
}
for (auto& t : threads) {
t.join();
}
}
assert(atom_success);
}
| 752 |
5,813 | <filename>core/src/main/java/org/apache/druid/math/expr/ExpressionType.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.math.expr;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import com.fasterxml.jackson.databind.ser.std.ToStringSerializer;
import org.apache.druid.java.util.common.IAE;
import org.apache.druid.java.util.common.ISE;
import org.apache.druid.segment.column.BaseTypeSignature;
import org.apache.druid.segment.column.ColumnType;
import org.apache.druid.segment.column.TypeSignature;
import org.apache.druid.segment.column.Types;
import org.apache.druid.segment.column.ValueType;
import javax.annotation.Nullable;
/**
* The type system used to process Druid expressions. This is basically {@link ColumnType}, but without
* {@link ColumnType#FLOAT} because the expression processing system does not currently directly support them.
*/
@JsonSerialize(using = ToStringSerializer.class)
public class ExpressionType extends BaseTypeSignature<ExprType>
{
public static final ExpressionType STRING =
new ExpressionType(ExprType.STRING, null, null);
public static final ExpressionType LONG =
new ExpressionType(ExprType.LONG, null, null);
public static final ExpressionType DOUBLE =
new ExpressionType(ExprType.DOUBLE, null, null);
public static final ExpressionType STRING_ARRAY =
new ExpressionType(ExprType.ARRAY, null, STRING);
public static final ExpressionType LONG_ARRAY =
new ExpressionType(ExprType.ARRAY, null, LONG);
public static final ExpressionType DOUBLE_ARRAY =
new ExpressionType(ExprType.ARRAY, null, DOUBLE);
public static final ExpressionType UNKNOWN_COMPLEX =
new ExpressionType(ExprType.COMPLEX, null, null);
@JsonCreator
public ExpressionType(
@JsonProperty("type") ExprType exprType,
@JsonProperty("complexTypeName") @Nullable String complexTypeName,
@JsonProperty("elementType") @Nullable ExpressionType elementType
)
{
super(ExpressionTypeFactory.getInstance(), exprType, complexTypeName, elementType);
}
@Nullable
@JsonCreator
public static ExpressionType fromString(@Nullable String typeName)
{
return Types.fromString(ExpressionTypeFactory.getInstance(), typeName);
}
/**
* If an {@link ExpressionType} is an array, return {@link ExpressionType#getElementType()}, otherwise the type is
* returned unchanged.
*/
@Nullable
public static ExpressionType elementType(@Nullable ExpressionType type)
{
if (type != null && type.isArray()) {
return (ExpressionType) type.getElementType();
}
return type;
}
/**
* Convert a primitive {@link ExpressionType} into an array of that type. Non-primitive types are passed through,
* even if they are not arrays.
*/
@Nullable
public static ExpressionType asArrayType(@Nullable ExpressionType elementType)
{
if (elementType != null && elementType.isPrimitive()) {
switch (elementType.getType()) {
case STRING:
return STRING_ARRAY;
case LONG:
return LONG_ARRAY;
case DOUBLE:
return DOUBLE_ARRAY;
}
}
return elementType;
}
/**
* The expression system does not distinguish between {@link ValueType#FLOAT} and {@link ValueType#DOUBLE}, so,
* this method will convert {@link ValueType#FLOAT} to {@link #DOUBLE}. Null values are not allowed in this method,
* and will result in an {@link IllegalStateException}
*
* @throws IllegalStateException
*/
public static ExpressionType fromColumnTypeStrict(@Nullable TypeSignature<ValueType> valueType)
{
if (valueType == null) {
throw new IllegalStateException("Unsupported unknown value type");
}
switch (valueType.getType()) {
case LONG:
return LONG;
case FLOAT:
case DOUBLE:
return DOUBLE;
case STRING:
return STRING;
case ARRAY:
switch (valueType.getElementType().getType()) {
case LONG:
return LONG_ARRAY;
case FLOAT:
case DOUBLE:
return DOUBLE_ARRAY;
case STRING:
return STRING_ARRAY;
}
return ExpressionTypeFactory.getInstance().ofArray(fromColumnTypeStrict(valueType.getElementType()));
case COMPLEX:
return ExpressionTypeFactory.getInstance().ofComplex(valueType.getComplexTypeName());
default:
throw new ISE("Unsupported value type[%s]", valueType);
}
}
/**
* The expression system does not distinguish between {@link ValueType#FLOAT} and {@link ValueType#DOUBLE}, so this
* method will convert {@link ValueType#FLOAT} to {@link #DOUBLE}.
*/
@Nullable
public static ExpressionType fromColumnType(@Nullable TypeSignature<ValueType> valueType)
{
if (valueType == null) {
return null;
}
switch (valueType.getType()) {
case LONG:
return LONG;
case FLOAT:
case DOUBLE:
return DOUBLE;
case STRING:
return STRING;
case ARRAY:
switch (valueType.getElementType().getType()) {
case LONG:
return LONG_ARRAY;
case FLOAT:
case DOUBLE:
return DOUBLE_ARRAY;
case STRING:
return STRING_ARRAY;
}
return ExpressionTypeFactory.getInstance().ofArray(fromColumnType(valueType.getElementType()));
case COMPLEX:
return ExpressionTypeFactory.getInstance().ofComplex(valueType.getComplexTypeName());
default:
return null;
}
}
/**
* Convert {@link ExpressionType} to the corresponding {@link ColumnType}
*/
public static ColumnType toColumnType(ExpressionType exprType)
{
switch (exprType.getType()) {
case LONG:
return ColumnType.LONG;
case DOUBLE:
return ColumnType.DOUBLE;
case STRING:
return ColumnType.STRING;
case ARRAY:
switch (exprType.getElementType().getType()) {
case LONG:
return ColumnType.LONG_ARRAY;
case DOUBLE:
return ColumnType.DOUBLE_ARRAY;
case STRING:
return ColumnType.STRING_ARRAY;
default:
return ColumnType.ofArray(toColumnType((ExpressionType) exprType.getElementType()));
}
case COMPLEX:
return ColumnType.ofComplex(exprType.getComplexTypeName());
default:
throw new ISE("Unsupported expression type[%s]", exprType);
}
}
public static void checkNestedArrayAllowed(ExpressionType outputType)
{
if (outputType.isArray() && outputType.getElementType().isArray() && !ExpressionProcessing.allowNestedArrays()) {
throw new IAE("Cannot create a nested array type [%s], 'druid.expressions.allowNestedArrays' must be set to true", outputType);
}
}
}
| 2,797 |
397 | <filename>robosuite/environments/__init__.py
from .base import REGISTERED_ENVS, MujocoEnv
ALL_ENVIRONMENTS = REGISTERED_ENVS.keys()
| 53 |
778 | <filename>modEvtMgr/test/org/aion/evtmgr/InterfaceCoverageTest.java
package org.aion.evtmgr;
import org.aion.evtmgr.impl.evt.EventBlock;
import org.aion.evtmgr.impl.handler.BlockHandler;
import org.junit.Test;
public class InterfaceCoverageTest {
@Test
public void testIEvent() {
IEvent event = new EventBlock(EventBlock.CALLBACK.ONBLOCK0);
}
@Test
public void testIHandler() {
IHandler handler = new BlockHandler();
IHandler.TYPE i = IHandler.TYPE.GETTYPE(1);
IHandler.TYPE i2 = IHandler.TYPE.GETTYPE(9);
}
}
| 233 |
487 | /*
* Copyright 2012-2014 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.intellij.erlang.rebar.runner;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.util.containers.ContainerUtil;
import org.intellij.erlang.psi.ErlangFile;
import org.intellij.erlang.psi.ErlangFunction;
import org.jetbrains.annotations.NotNull;
import java.util.Collection;
import java.util.Set;
class RebarEunitConfigurationUtil {
private RebarEunitConfigurationUtil() {
}
private static boolean appendSuitesOption(StringBuilder commandBuilder, Collection<ErlangFile> suites) {
boolean suiteAdded = false;
int lengthBeforeAppendingSuitesOption = commandBuilder.length();
commandBuilder.append("suites=");
for (ErlangFile suiteFile : suites) {
VirtualFile virtualFile = suiteFile.getVirtualFile();
if (virtualFile != null) {
commandBuilder.append(virtualFile.getNameWithoutExtension());
commandBuilder.append(",");
suiteAdded = true;
}
}
commandBuilder.setLength(commandBuilder.length() - 1);
if (!suiteAdded) {
commandBuilder.setLength(lengthBeforeAppendingSuitesOption);
}
return suiteAdded;
}
private static void appendTestsOption(StringBuilder commandBuilder, Collection<ErlangFunction> functions) {
if (functions.isEmpty()) return;
Set<String> distinctFunctionNames = ContainerUtil.map2Set(functions, ErlangFunction::getName);
commandBuilder.append("tests=");
commandBuilder.append(StringUtil.join(distinctFunctionNames, ","));
}
@NotNull
static String createDefaultRebarCommand(Collection<ErlangFile> suites, Collection<ErlangFunction> functions, boolean failIfNoSuitesSpecified) {
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.append("eunit ");
if (!appendSuitesOption(commandBuilder, suites) && failIfNoSuitesSpecified) return "";
commandBuilder.append(' ');
appendTestsOption(commandBuilder, functions);
return commandBuilder.toString();
}
}
| 796 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.authorization.fluent;
import com.azure.core.annotation.ReturnType;
import com.azure.core.annotation.ServiceMethod;
import com.azure.core.http.rest.Response;
import com.azure.core.util.Context;
import com.azure.resourcemanager.authorization.fluent.models.GroupLifecyclePoliciesAddGroupRequestBody;
import com.azure.resourcemanager.authorization.fluent.models.GroupLifecyclePoliciesRemoveGroupRequestBody;
import reactor.core.publisher.Mono;
/** An instance of this class provides access to all the operations defined in GroupLifecyclePoliciesClient. */
public interface GroupLifecyclePoliciesClient {
/**
* Invoke action addGroup.
*
* @param groupLifecyclePolicyId key: id of groupLifecyclePolicy.
* @param body Action parameters.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.authorization.fluent.models.OdataErrorMainException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Mono<Response<Boolean>> addGroupWithResponseAsync(
String groupLifecyclePolicyId, GroupLifecyclePoliciesAddGroupRequestBody body);
/**
* Invoke action addGroup.
*
* @param groupLifecyclePolicyId key: id of groupLifecyclePolicy.
* @param body Action parameters.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.authorization.fluent.models.OdataErrorMainException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Mono<Boolean> addGroupAsync(String groupLifecyclePolicyId, GroupLifecyclePoliciesAddGroupRequestBody body);
/**
* Invoke action addGroup.
*
* @param groupLifecyclePolicyId key: id of groupLifecyclePolicy.
* @param body Action parameters.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.authorization.fluent.models.OdataErrorMainException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
boolean addGroup(String groupLifecyclePolicyId, GroupLifecyclePoliciesAddGroupRequestBody body);
/**
* Invoke action addGroup.
*
* @param groupLifecyclePolicyId key: id of groupLifecyclePolicy.
* @param body Action parameters.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.authorization.fluent.models.OdataErrorMainException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Response<Boolean> addGroupWithResponse(
String groupLifecyclePolicyId, GroupLifecyclePoliciesAddGroupRequestBody body, Context context);
/**
* Invoke action removeGroup.
*
* @param groupLifecyclePolicyId key: id of groupLifecyclePolicy.
* @param body Action parameters.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.authorization.fluent.models.OdataErrorMainException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Mono<Response<Boolean>> removeGroupWithResponseAsync(
String groupLifecyclePolicyId, GroupLifecyclePoliciesRemoveGroupRequestBody body);
/**
* Invoke action removeGroup.
*
* @param groupLifecyclePolicyId key: id of groupLifecyclePolicy.
* @param body Action parameters.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.authorization.fluent.models.OdataErrorMainException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Mono<Boolean> removeGroupAsync(String groupLifecyclePolicyId, GroupLifecyclePoliciesRemoveGroupRequestBody body);
/**
* Invoke action removeGroup.
*
* @param groupLifecyclePolicyId key: id of groupLifecyclePolicy.
* @param body Action parameters.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.authorization.fluent.models.OdataErrorMainException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
boolean removeGroup(String groupLifecyclePolicyId, GroupLifecyclePoliciesRemoveGroupRequestBody body);
/**
* Invoke action removeGroup.
*
* @param groupLifecyclePolicyId key: id of groupLifecyclePolicy.
* @param body Action parameters.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws com.azure.resourcemanager.authorization.fluent.models.OdataErrorMainException thrown if the request is
* rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
Response<Boolean> removeGroupWithResponse(
String groupLifecyclePolicyId, GroupLifecyclePoliciesRemoveGroupRequestBody body, Context context);
}
| 2,063 |
411 | <filename>lib/cpp-test/hawkes/simulation/hawkes_kernel_power_law_gtest.cpp
// License: BSD 3 clause
#include <gtest/gtest.h>
#include "tick/hawkes/simulation/hawkes_kernels/hawkes_kernel_power_law.h"
class HawkesKernelPowerLawTest : public ::testing::Test {
protected:
double multiplier;
double cutoff;
double exponent;
HawkesKernelPowerLaw hawkes_kernel_power_law;
ArrayDouble timestamps;
std::array<double, 6> test_times{{1., 2., 3.5, 5., 8., 100.}};
HawkesKernelPowerLawTest() : hawkes_kernel_power_law(0, 0, 0){};
void SetUp() override {
multiplier = 0.1;
cutoff = 0.01;
exponent = 1.2;
hawkes_kernel_power_law =
HawkesKernelPowerLaw(multiplier, cutoff, exponent);
timestamps = ArrayDouble{0.31, 0.93, 1.29, 2.32, 4.25};
}
};
TEST_F(HawkesKernelPowerLawTest, is_zero) {
EXPECT_FALSE(hawkes_kernel_power_law.is_zero());
}
TEST_F(HawkesKernelPowerLawTest, get_value) {
EXPECT_DOUBLE_EQ(hawkes_kernel_power_law.get_value(-3), 0);
for (double test_time : test_times) {
EXPECT_DOUBLE_EQ(hawkes_kernel_power_law.get_value(test_time),
multiplier * pow(test_time + cutoff, -exponent));
}
}
TEST_F(HawkesKernelPowerLawTest, get_norm) {
EXPECT_DOUBLE_EQ(hawkes_kernel_power_law.get_norm(), 1.2096372793483503);
}
TEST_F(HawkesKernelPowerLawTest, invalid_constructor_parameters) {
EXPECT_THROW(HawkesKernelPowerLaw(multiplier, cutoff, exponent, -1, -1),
std::invalid_argument);
EXPECT_THROW(HawkesKernelPowerLaw(multiplier, cutoff, exponent, -1, 0),
std::invalid_argument);
EXPECT_THROW(HawkesKernelPowerLaw(multiplier, cutoff, exponent, 0, -1),
std::invalid_argument);
EXPECT_THROW(HawkesKernelPowerLaw(multiplier, cutoff, exponent, 0, 0),
std::invalid_argument);
}
#ifdef ADD_MAIN
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
#endif // ADD_MAIN
| 825 |
2,503 | //*********************************************************
//
// Copyright (c) Microsoft. All rights reserved.
// This code is licensed under the MIT License (MIT).
// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
//
//*********************************************************
#include "../stdafx.h"
#include "EngineTuning.h"
#include "EngineProfiling.h"
#include "GameInput.h"
using namespace std;
wstring FormattedString(const wchar_t* format, ...)
{
wchar_t buffer[256];
va_list ap;
va_start(ap, format);
vswprintf(buffer, 256, format, ap);
return wstring(buffer);
}
namespace EngineTuning
{
// For delayed registration. Some objects are constructed before we can add them to the graph (due
// to unreliable order of initialization.)
enum { kMaxUnregisteredTweaks = 1024 };
WCHAR s_UnregisteredPath[kMaxUnregisteredTweaks][128];
EngineVar* s_UnregisteredVariable[kMaxUnregisteredTweaks] = { nullptr };
int s_UnregisteredCount = 0;
float s_ScrollOffset = 0.0f;
float s_ScrollTopTrigger = 1080.0f * 0.2f;
float s_ScrollBottomTrigger = 1080.0f * 0.8f;
// Internal functions
void AddToVariableGraph(const wstring& path, EngineVar& var);
void RegisterVariable(const wstring& path, EngineVar& var);
EngineVar* sm_SelectedVariable = nullptr;
bool sm_IsVisible = true;
}
// Not open to the public. Groups are auto-created when a tweaker's path includes the group name.
class VariableGroup : public EngineVar
{
public:
VariableGroup() : m_IsExpanded(false) {}
EngineVar* FindChild(const wstring& name)
{
auto iter = m_Children.find(name);
return iter == m_Children.end() ? nullptr : iter->second;
}
void AddChild(const wstring& name, EngineVar& child)
{
m_Children[name] = &child;
child.m_GroupPtr = this;
}
void Display(wstringstream* renderText, UINT leftMargin, EngineVar* highlightedTweak);
void SaveToFile(FILE* file, int fileMargin);
void LoadSettingsFromFile(FILE* file);
EngineVar* NextVariable(EngineVar* currentVariable);
EngineVar* PrevVariable(EngineVar* currentVariable);
EngineVar* FirstVariable(void);
EngineVar* LastVariable(void);
bool IsExpanded(void) const { return m_IsExpanded; }
virtual void Increment(void) override { m_IsExpanded = true; }
virtual void Decrement(void) override { m_IsExpanded = false; }
virtual void Bang(void) override { m_IsExpanded = !m_IsExpanded; }
virtual void SetValue(FILE*, const wstring&) override {}
static VariableGroup sm_RootGroup;
private:
bool m_IsExpanded;
map<wstring, EngineVar*> m_Children;
};
VariableGroup VariableGroup::sm_RootGroup;
//=====================================================================================================================
// VariableGroup implementation
namespace
{
wstring Indent(UINT spaces)
{
wstring s;
return s.append(spaces, L' ');
}
}
void VariableGroup::Display(wstringstream* renderText, UINT leftMargin, EngineVar* highlightedTweak)
{
for (auto iter = m_Children.begin(); iter != m_Children.end(); ++iter)
{
*renderText << ((iter->second == highlightedTweak) ? L"[x] " : L"[] ");
VariableGroup* subGroup = dynamic_cast<VariableGroup*>(iter->second);
if (subGroup != nullptr)
{
*renderText << Indent(leftMargin);
if (subGroup->IsExpanded())
{
*renderText << L"- ";
}
else
{
*renderText << L"+ ";
}
*renderText << iter->first;
*renderText << L"/...\n";
if (subGroup->IsExpanded())
{
subGroup->Display(renderText, leftMargin + 4, highlightedTweak);
}
}
else
{
*renderText << Indent(leftMargin)
<< iter->first << L": " << iter->second->ToFormattedString()
<< L"\n";
}
}
}
void VariableGroup::SaveToFile(FILE* file, int fileMargin)
{
for (auto iter = m_Children.begin(); iter != m_Children.end(); ++iter)
{
const WCHAR* buffer = (iter->first).c_str();
VariableGroup* subGroup = dynamic_cast<VariableGroup*>(iter->second);
if (subGroup != nullptr)
{
fwprintf(file, L"%*c + %s ...\r\n", fileMargin, L' ', buffer);
subGroup->SaveToFile(file, fileMargin + 5);
}
else if (dynamic_cast<CallbackTrigger*>(iter->second) == nullptr)
{
fwprintf(file, L"%*c %s: %s\r\n", fileMargin, L' ', buffer, iter->second->ToString().c_str());
}
}
}
void VariableGroup::LoadSettingsFromFile(FILE* file)
{
for (auto iter = m_Children.begin(); iter != m_Children.end(); ++iter)
{
VariableGroup* subGroup = dynamic_cast<VariableGroup*>(iter->second);
if (subGroup != nullptr)
{
WCHAR skippedLines[100];
fwscanf_s(file, L"%*s %[^\n]", skippedLines, (int)_countof(skippedLines));
subGroup->LoadSettingsFromFile(file);
}
else
{
iter->second->SetValue(file, iter->first);
}
}
}
EngineVar* VariableGroup::FirstVariable(void)
{
return m_Children.size() == 0 ? nullptr : m_Children.begin()->second;
}
EngineVar* VariableGroup::LastVariable(void)
{
if (m_Children.size() == 0)
return this;
auto LastVariable = m_Children.end();
--LastVariable;
VariableGroup* isGroup = dynamic_cast<VariableGroup*>(LastVariable->second);
if (isGroup && isGroup->IsExpanded())
return isGroup->LastVariable();
return LastVariable->second;
}
EngineVar* VariableGroup::NextVariable(EngineVar* curVar)
{
auto iter = m_Children.begin();
for (; iter != m_Children.end(); ++iter)
{
if (curVar == iter->second)
break;
}
ThrowIfFalse(iter != m_Children.end(), L"Did not find engine variable in its designated group");
auto nextIter = iter;
++nextIter;
if (nextIter == m_Children.end())
return m_GroupPtr ? m_GroupPtr->NextVariable(this) : nullptr;
else
return nextIter->second;
}
EngineVar* VariableGroup::PrevVariable(EngineVar* curVar)
{
auto iter = m_Children.begin();
for (; iter != m_Children.end(); ++iter)
{
if (curVar == iter->second)
break;
}
ThrowIfFalse(iter != m_Children.end(), L"Did not find engine variable in its designated group");
if (iter == m_Children.begin())
return this;
auto prevIter = iter;
--prevIter;
VariableGroup* isGroup = dynamic_cast<VariableGroup*>(prevIter->second);
if (isGroup && isGroup->IsExpanded())
return isGroup->LastVariable();
return prevIter->second;
}
//=====================================================================================================================
// EngineVar implementations
EngineVar::EngineVar(function<void(void*)> callback, void* args) :
m_GroupPtr(nullptr),
m_Callback(callback),
m_Arguments(args)
{
}
EngineVar::EngineVar(const wstring& path, function<void(void*)> callback, void* args) :
m_GroupPtr(nullptr),
m_Callback(callback),
m_Arguments(args)
{
EngineTuning::RegisterVariable(path, *this);
}
void EngineVar::Initialize(const wstring& path, function<void(void*)> callback, void* args)
{
m_Callback = callback;
m_Arguments = args;
EngineTuning::RegisterVariable(path, *this);
}
EngineVar* EngineVar::NextVar(void)
{
EngineVar* next = nullptr;
VariableGroup* isGroup = dynamic_cast<VariableGroup*>(this);
if (isGroup != nullptr && isGroup->IsExpanded())
next = isGroup->FirstVariable();
if (next == nullptr)
next = m_GroupPtr->NextVariable(this);
return next != nullptr ? next : this;
}
EngineVar* EngineVar::PrevVar(void)
{
EngineVar* prev = m_GroupPtr->PrevVariable(this);
if (prev != nullptr && prev != m_GroupPtr)
{
VariableGroup* isGroup = dynamic_cast<VariableGroup*>(prev);
if (isGroup != nullptr && isGroup->IsExpanded())
prev = isGroup->LastVariable();
}
return prev != nullptr ? prev : this;
}
void EngineVar::OnChanged(bool callCallback)
{
if (callCallback && m_Callback)
{
m_Callback(m_Arguments);
}
}
BoolVar::BoolVar(const wstring& path, bool val, function<void(void*)> callback, void* args)
: EngineVar(path, callback, args)
{
m_Flag = val;
}
void BoolVar::Initialize(const wstring& path, bool val, function<void(void*)> callback, void* args)
{
EngineVar::Initialize(path, callback, args);
m_Flag = val;
}
wstring BoolVar::ToFormattedString() const
{
return ToString();
}
wstring BoolVar::ToString(void) const
{
return m_Flag ? L"on" : L"off";
}
void BoolVar::SetValue(FILE* file, const wstring& setting)
{
wstring pattern = L"\n L" + setting + L": %s";
WCHAR valstr[6];
// Search through the file for an entry that matches this setting's name
fwscanf_s(file, pattern.c_str(), valstr, _countof(valstr));
// Look for one of the many affirmations
m_Flag = (
0 == _wcsicmp(valstr, L"1") ||
0 == _wcsicmp(valstr, L"on") ||
0 == _wcsicmp(valstr, L"yes") ||
0 == _wcsicmp(valstr, L"true"));
}
void BoolVar::SetValue(bool value, bool callCallback)
{
m_Flag = value;
OnChanged(callCallback);
}
NumVar::NumVar(const wstring& path, float val, float minValue, float maxValue, float stepSize, function<void(void*)> callback, void* args)
: EngineVar(path, callback, args)
{
ThrowIfFalse(minValue <= maxValue);
m_MinValue = minValue;
m_MaxValue = maxValue;
m_Value = Clamp(val);
m_StepSize = stepSize;
}
void NumVar::Initialize(const std::wstring& path, float val, float minValue, float maxValue, float stepSize, std::function<void(void*)> callback, void* args)
{
EngineVar::Initialize(path, callback, args);
ThrowIfFalse(minValue <= maxValue);
m_MinValue = minValue;
m_MaxValue = maxValue;
m_Value = Clamp(val);
m_StepSize = stepSize;
}
wstring NumVar::ToFormattedString() const
{
return FormattedString(L"%-11f", m_Value);
}
wstring NumVar::ToString(void) const
{
WCHAR buf[128];
swprintf_s(buf, L"%f", m_Value);
return buf;
}
void NumVar::SetValue(FILE* file, const wstring& setting)
{
wstring scanString = L"\n" + setting + L": %f";
float valueRead;
//If we haven't read correctly, just keep m_Value at default value
if (fwscanf_s(file, scanString.c_str(), &valueRead))
*this = valueRead;
}
void NumVar::SetValue(float value, bool callCallback)
{
m_Value = Clamp(value);
OnChanged(callCallback);
}
#if _MSC_VER < 1800
__forceinline float log2(float x) { return log(x) / log(2.0f); }
__forceinline float exp2(float x) { return pow(2.0f, x); }
#endif
ExpVar::ExpVar(const wstring& path, float val, float minExp, float maxExp, float expStepSize, function<void(void*)> callback, void* args)
: NumVar(path, log2(val), minExp, maxExp, expStepSize, callback, args)
{
}
ExpVar& ExpVar::operator=(float val)
{
m_Value = Clamp(log2(val));
return *this;
}
ExpVar::operator float() const
{
return exp2(m_Value);
}
void ExpVar::Initialize(const std::wstring& path, float val, float minExp, float maxExp, float expStepSize, std::function<void(void*)> callback, void* args)
{
NumVar::Initialize(path, val, minExp, maxExp, expStepSize, callback, args);
}
wstring ExpVar::ToFormattedString() const
{
return FormattedString(L"%-11f", (float)*this);
}
wstring ExpVar::ToString(void) const
{
WCHAR buf[128];
swprintf_s(buf, L"%f", (float)*this);
return buf;
}
void ExpVar::SetValue(FILE* file, const wstring& setting)
{
wstring scanString = L"\n" + setting + L": %f";
float valueRead;
//If we haven't read correctly, just keep m_Value at default value
if (fwscanf_s(file, scanString.c_str(), &valueRead))
*this = valueRead;
}
IntVar::IntVar(const wstring& path, int val, int minVal, int maxVal, int stepSize, function<void(void*)> callback, void* args)
: EngineVar(path, callback, args)
{
ThrowIfFalse(minVal <= maxVal);
m_MinValue = minVal;
m_MaxValue = maxVal;
m_Value = Clamp(val);
m_StepSize = stepSize;
}
void IntVar::Initialize(const wstring& path, int val, int minVal, int maxVal, int stepSize, function<void(void*)> callback, void* args)
{
EngineVar::Initialize(path, callback, args);
ThrowIfFalse(minVal <= maxVal);
m_MinValue = minVal;
m_MaxValue = maxVal;
m_Value = Clamp(val);
m_StepSize = stepSize;
}
wstring IntVar::ToFormattedString() const
{
return FormattedString(L"%-11d", m_Value);
}
wstring IntVar::ToString(void) const
{
WCHAR buf[128];
swprintf_s(buf, L"%d", m_Value);
return buf;
}
void IntVar::SetValue(FILE* file, const wstring& setting)
{
wstring scanString = L"\n" + setting + L": %d";
int valueRead;
if (fwscanf_s(file, scanString.c_str(), &valueRead))
*this = valueRead;
}
void IntVar::SetValue(int value, bool callCallback)
{
m_Value = Clamp(value);
OnChanged(callCallback);
}
EnumVar::EnumVar(const wstring& path, int initialVal, int listLength, const WCHAR** listLabels, function<void(void*)> callback, void* args)
: EngineVar(path, callback, args)
{
ThrowIfFalse(listLength > 0);
m_EnumLength = listLength;
m_EnumLabels = listLabels;
m_Value = Clamp(initialVal);
}
void EnumVar::Initialize(const wstring& path, int initialVal, int listLength, const WCHAR** listLabels, function<void(void*)> callback, void* args)
{
EngineVar::Initialize(path, callback, args);
ThrowIfFalse(listLength > 0);
m_EnumLength = listLength;
m_EnumLabels = listLabels;
m_Value = Clamp(initialVal);
}
wstring EnumVar::ToFormattedString() const
{
return m_EnumLabels[m_Value];
}
wstring EnumVar::ToString(void) const
{
return m_EnumLabels[m_Value];
}
void EnumVar::SetValue(FILE* file, const wstring& setting)
{
wstring scanString = L"\n" + setting + L": %[^\n]";
WCHAR valueRead[14];
if (fwscanf_s(file, scanString.c_str(), valueRead, _countof(valueRead)) == 1)
{
wstring valueReadStr = valueRead;
valueReadStr = valueReadStr.substr(0, valueReadStr.length() - 1);
//if we don't find the wstring, then leave m_EnumLabes[m_Value] as default
for(int i = 0; i < m_EnumLength; ++i)
{
if (m_EnumLabels[i] == valueReadStr)
{
m_Value = i;
break;
}
}
}
}
void EnumVar::SetValue(int value, bool callCallback)
{
m_Value = Clamp(value);
OnChanged(callCallback);
}
CallbackTrigger::CallbackTrigger(const wstring& path, function<void (void*)> callback, void* args)
: EngineVar(path, callback, args)
{
m_BangDisplay = 0;
}
wstring CallbackTrigger::ToFormattedString() const
{
if (m_BangDisplay > 0)
--m_BangDisplay;
static const WCHAR s_animation[] = { L'-', L'\\', L'|', L'/' };
return FormattedString(L"[%c]", s_animation[(m_BangDisplay >> 3) & 3]);
}
void CallbackTrigger::SetValue(FILE* file, const wstring& setting)
{
//Skip over setting without reading anything
wstring scanString = L"\n" + setting + L": %[^\n]";
WCHAR skippedLines[100];
fwscanf_s(file, scanString.c_str(), skippedLines, _countof(skippedLines));
}
//=====================================================================================================================
// EngineTuning namespace methods
void EngineTuning::Initialize(void)
{
for (int i = 0; i < s_UnregisteredCount; ++i)
{
ThrowIfFalse(wcslen(s_UnregisteredPath[i]) > 0, L"Register = %d\n", i);
ThrowIfFalse(s_UnregisteredVariable[i] != nullptr);
AddToVariableGraph(s_UnregisteredPath[i], *s_UnregisteredVariable[i]);
}
s_UnregisteredCount = -1;
}
void HandleDigitalButtonPress(GameInput::DigitalInput button, float timeDelta, function<void ()> action)
{
if (!GameInput::IsPressed(button))
return;
float durationHeld = GameInput::GetDurationPressed(button);
// Tick on the first press
if (durationHeld == 0.0f)
{
action();
return;
}
// After ward, tick at fixed intervals
float oldDuration = durationHeld - timeDelta;
// Before 2 seconds, use slow scale (200ms/tick), afterward use fast scale (50ms/tick).
float timeStretch = durationHeld < 2.0f ? 5.0f : 20.0f;
if (floor(durationHeld * timeStretch) > floor(oldDuration * timeStretch))
action();
}
void EngineTuning::Update(float frameTime)
{
if (GameInput::IsFirstPressed(GameInput::kBackButton)
|| GameInput::IsFirstPressed(GameInput::kKey_back))
sm_IsVisible = !sm_IsVisible;
if (!sm_IsVisible)
return;
if (sm_SelectedVariable == nullptr || sm_SelectedVariable == &VariableGroup::sm_RootGroup)
sm_SelectedVariable = VariableGroup::sm_RootGroup.FirstVariable();
if (sm_SelectedVariable == nullptr)
return;
// Detect a DPad button press
HandleDigitalButtonPress(GameInput::kDPadRight, frameTime, []{ sm_SelectedVariable->Increment(); });
HandleDigitalButtonPress(GameInput::kDPadLeft, frameTime, []{ sm_SelectedVariable->Decrement(); });
HandleDigitalButtonPress(GameInput::kDPadDown, frameTime, []{ sm_SelectedVariable = sm_SelectedVariable->NextVar(); });
HandleDigitalButtonPress(GameInput::kDPadUp, frameTime, []{ sm_SelectedVariable = sm_SelectedVariable->PrevVar(); });
HandleDigitalButtonPress(GameInput::kKey_right, frameTime, []{ sm_SelectedVariable->Increment(); });
HandleDigitalButtonPress(GameInput::kKey_left, frameTime, []{ sm_SelectedVariable->Decrement(); });
HandleDigitalButtonPress(GameInput::kKey_down, frameTime, []{ sm_SelectedVariable = sm_SelectedVariable->NextVar(); });
HandleDigitalButtonPress(GameInput::kKey_up, frameTime, []{ sm_SelectedVariable = sm_SelectedVariable->PrevVar(); });
if (GameInput::IsFirstPressed(GameInput::kAButton)
|| GameInput::IsFirstPressed(GameInput::kKey_return))
{
sm_SelectedVariable->Bang();
}
}
void StartSave(void*)
{
FILE* settingsFile;
_wfopen_s(&settingsFile, L"engineTuning.txt", L"wb");
if (settingsFile != nullptr)
{
VariableGroup::sm_RootGroup.SaveToFile(settingsFile, 2);
fclose(settingsFile);
}
}
function<void(void*)> StartSaveFunc = StartSave;
void StartLoad(void*)
{
FILE* settingsFile;
_wfopen_s(&settingsFile, L"engineTuning.txt", L"rb");
if (settingsFile != nullptr)
{
VariableGroup::sm_RootGroup.LoadSettingsFromFile(settingsFile);
fclose(settingsFile);
}
}
function<void(void*)> StartLoadFunc = StartLoad;
void EngineTuning::Display(wstringstream* renderText, bool expandAllNodes)
{
EngineProfiling::DisplayFrameRate(*renderText, 0);
if (!sm_IsVisible)
{
EngineProfiling::Display(*renderText, 0, expandAllNodes);
return;
}
*renderText << L"Engine Tuning (use arrow keys)\n";
VariableGroup::sm_RootGroup.Display(renderText, 0, sm_SelectedVariable);
}
void EngineTuning::AddToVariableGraph(const wstring& path, EngineVar& var)
{
vector<wstring> separatedPath;
wstring leafName;
size_t start = 0, end = 0;
while (1)
{
end = path.find('/', start);
if (end == wstring::npos)
{
leafName = path.substr(start);
break;
}
else
{
separatedPath.push_back(path.substr(start, end - start));
start = end + 1;
}
}
VariableGroup* group = &VariableGroup::sm_RootGroup;
for (auto iter = separatedPath.begin(); iter != separatedPath.end(); ++iter)
{
VariableGroup* nextGroup;
EngineVar* node = group->FindChild(*iter);
if (node == nullptr)
{
nextGroup = new VariableGroup();
group->AddChild(*iter, *nextGroup);
group = nextGroup;
}
else
{
nextGroup = dynamic_cast<VariableGroup*>(node);
ThrowIfFalse(nextGroup != nullptr, L"Attempted to trash the tweak graph");
group = nextGroup;
}
}
group->AddChild(leafName, var);
}
void EngineTuning::RegisterVariable(const wstring& path, EngineVar& var)
{
if (s_UnregisteredCount >= 0)
{
int Idx = s_UnregisteredCount++;
wcscpy_s(s_UnregisteredPath[Idx], path.c_str());
s_UnregisteredVariable[Idx] = &var;
}
else
{
AddToVariableGraph(path, var);
}
}
bool EngineTuning::IsFocused(void)
{
return sm_IsVisible;
}
void EngineTuning::SetIsVisible(bool isVisible)
{
sm_IsVisible = isVisible;
} | 9,067 |
2,144 | <reponame>ananthdurai/pinot<filename>pinot-core/src/main/java/org/apache/pinot/core/plan/DistinctPlanNode.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pinot.core.plan;
import java.util.List;
import org.apache.pinot.common.request.context.ExpressionContext;
import org.apache.pinot.core.common.Operator;
import org.apache.pinot.core.operator.blocks.IntermediateResultsBlock;
import org.apache.pinot.core.operator.query.DictionaryBasedDistinctOperator;
import org.apache.pinot.core.operator.query.DistinctOperator;
import org.apache.pinot.core.operator.transform.TransformOperator;
import org.apache.pinot.core.query.aggregation.function.AggregationFunction;
import org.apache.pinot.core.query.aggregation.function.DistinctAggregationFunction;
import org.apache.pinot.core.query.request.context.QueryContext;
import org.apache.pinot.segment.spi.IndexSegment;
import org.apache.pinot.segment.spi.datasource.DataSource;
import org.apache.pinot.segment.spi.datasource.DataSourceMetadata;
import org.apache.pinot.segment.spi.index.reader.Dictionary;
/**
* Execution plan for distinct queries on a single segment.
*/
@SuppressWarnings("rawtypes")
public class DistinctPlanNode implements PlanNode {
private final IndexSegment _indexSegment;
private final QueryContext _queryContext;
public DistinctPlanNode(IndexSegment indexSegment, QueryContext queryContext) {
_indexSegment = indexSegment;
_queryContext = queryContext;
}
@Override
public Operator<IntermediateResultsBlock> run() {
AggregationFunction[] aggregationFunctions = _queryContext.getAggregationFunctions();
assert aggregationFunctions != null && aggregationFunctions.length == 1
&& aggregationFunctions[0] instanceof DistinctAggregationFunction;
DistinctAggregationFunction distinctAggregationFunction = (DistinctAggregationFunction) aggregationFunctions[0];
List<ExpressionContext> expressions = distinctAggregationFunction.getInputExpressions();
// Use dictionary to solve the query if possible
if (_queryContext.getFilter() == null && expressions.size() == 1) {
ExpressionContext expression = expressions.get(0);
if (expression.getType() == ExpressionContext.Type.IDENTIFIER) {
DataSource dataSource = _indexSegment.getDataSource(expression.getIdentifier());
Dictionary dictionary = dataSource.getDictionary();
if (dictionary != null) {
DataSourceMetadata dataSourceMetadata = dataSource.getDataSourceMetadata();
return new DictionaryBasedDistinctOperator(dataSourceMetadata.getDataType(), distinctAggregationFunction,
dictionary, dataSourceMetadata.getNumDocs());
}
}
}
TransformOperator transformOperator =
new TransformPlanNode(_indexSegment, _queryContext, expressions, DocIdSetPlanNode.MAX_DOC_PER_CALL).run();
return new DistinctOperator(_indexSegment, distinctAggregationFunction, transformOperator);
}
}
| 1,096 |
12,824 | /*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.runtime;
public final class VolatileObjectRef<T> implements java.io.Serializable {
private static final long serialVersionUID = -9055728157600312291L;
volatile public T elem;
public VolatileObjectRef(T elem) { this.elem = elem; }
@Override
public String toString() { return String.valueOf(elem); }
public static <U> VolatileObjectRef<U> create(U e) { return new VolatileObjectRef<U>(e); }
public static VolatileObjectRef<Object> zero() { return new VolatileObjectRef<Object>(null); }
}
| 262 |
592 | <reponame>pombredanne/vulncode-db<gh_stars>100-1000
"""Makes vulnerability_resources FK non-nullable.
Revision ID: 4c879a183a73
Revises: <KEY>
Create Date: 2020-10-16 07:28:50.893030
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "4c879a183a73"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade():
# Make sure the FK on vulnerability_resources can never be null.
op.alter_column(
"vulnerability_resources",
"vulnerability_details_id",
existing_type=sa.Integer(),
nullable=False,
)
def downgrade():
op.alter_column(
"vulnerability_resources",
"vulnerability_details_id",
existing_type=sa.Integer(),
nullable=True,
)
| 322 |
386 | <gh_stars>100-1000
//////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Image Engine Design nor the names of any
// other contributors to this software may be used to endorse or
// promote products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////////////////
#include "IECoreHoudini/FromHoudiniCompoundObjectConverter.h"
#include "IECoreHoudini/GEO_CortexPrimitive.h"
#include "IECore/CompoundObject.h"
#include "GA/GA_Names.h"
#if UT_MAJOR_VERSION_INT >= 14
typedef IECoreHoudini::GEO_CortexPrimitive CortexPrimitive;
#else
#include "IECoreHoudini/GU_CortexPrimitive.h"
typedef IECoreHoudini::GU_CortexPrimitive CortexPrimitive;
#endif
using namespace IECore;
using namespace IECoreScene;
using namespace IECoreHoudini;
IE_CORE_DEFINERUNTIMETYPED( FromHoudiniCompoundObjectConverter );
FromHoudiniGeometryConverter::Description<FromHoudiniCompoundObjectConverter> FromHoudiniCompoundObjectConverter::m_description( CompoundObjectTypeId );
FromHoudiniCompoundObjectConverter::FromHoudiniCompoundObjectConverter( const GU_DetailHandle &handle ) :
FromHoudiniGeometryConverter( handle, "Converts a Houdini GU_Detail to an IECore::CompoundObject." )
{
}
FromHoudiniCompoundObjectConverter::FromHoudiniCompoundObjectConverter( const SOP_Node *sop ) :
FromHoudiniGeometryConverter( sop, "Converts a Houdini GU_Detail to an IECore::CompoundObject." )
{
}
FromHoudiniCompoundObjectConverter::~FromHoudiniCompoundObjectConverter()
{
}
FromHoudiniGeometryConverter::Convertability FromHoudiniCompoundObjectConverter::canConvert( const GU_Detail *geo )
{
const GA_PrimitiveList &primitives = geo->getPrimitiveList();
// need multiple names
GA_ROHandleS nameAttrib( geo, GA_ATTRIB_PRIMITIVE, GA_Names::name );
if ( nameAttrib.isValid() )
{
GA_StringTableStatistics stats;
const GA_Attribute *nameAttr = nameAttrib.getAttribute();
const GA_AIFSharedStringTuple *tuple = nameAttr->getAIFSharedStringTuple();
tuple->getStatistics( nameAttr, stats );
if ( stats.getEntries() < 2 )
{
return Inapplicable;
}
}
else
{
return Inapplicable;
}
// Need them all to be convertable as objects. Even then, if they're VisibleRenderable,
// then the FromHoudiniGroupConverter would be preferable.
bool nonRenderable = false;
GA_Offset start, end;
for( GA_Iterator it( geo->getPrimitiveRange() ); it.blockAdvance( start, end ); )
{
for( GA_Offset offset = start; offset < end; ++offset )
{
const GA_Primitive *prim = primitives.get( offset );
if( prim->getTypeId() != CortexPrimitive::typeId() )
{
return Inapplicable;
}
if( !IECore::runTimeCast<const VisibleRenderable>( ( (CortexPrimitive *) prim )->getObject() ) )
{
nonRenderable = true;
}
}
}
return ( nonRenderable ) ? Ideal : Suitable;
}
ObjectPtr FromHoudiniCompoundObjectConverter::doDetailConversion( const GU_Detail *geo, const CompoundObject *operands ) const
{
GA_ROHandleS nameAttrib( geo, GA_ATTRIB_PRIMITIVE, GA_Names::name );
if( nameAttrib.isInvalid() )
{
throw std::runtime_error( "FromHoudiniCompoundObjectConverter: Can only convert named CortexObject primitives" );
}
CompoundObjectPtr result = new CompoundObject();
const GA_PrimitiveList &primitives = geo->getPrimitiveList();
GA_Offset start, end;
for( GA_Iterator it( geo->getPrimitiveRange() ); it.blockAdvance( start, end ); )
{
for( GA_Offset offset = start; offset < end; ++offset )
{
const GA_Primitive *prim = primitives.get( offset );
if( prim->getTypeId() != CortexPrimitive::typeId() )
{
throw std::runtime_error( "FromHoudiniCompoundObjectConverter: Geometry contains non-CortexObject primitives" );
}
std::string name = "";
const char *tmp = nameAttrib.get( offset );
if( tmp )
{
name = tmp;
}
ConstObjectPtr object = ( (CortexPrimitive *) prim )->getObject();
result->members()[name] = ( object ) ? object->copy() : nullptr;
}
}
return result;
}
| 1,865 |
582 | <gh_stars>100-1000
package org.weiboad.ragnar.server.statistics.api;
import com.google.gson.Gson;
import com.google.gson.JsonSyntaxException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Scope;
import org.springframework.scheduling.annotation.Async;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import org.weiboad.ragnar.server.config.FieryConfig;
import org.weiboad.ragnar.server.storage.DBManage;
import org.weiboad.ragnar.server.storage.DBSharder;
import org.weiboad.ragnar.server.struct.MetaLog;
import org.weiboad.ragnar.server.util.DateTimeHelper;
import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
@Component
@Scope("singleton")
public class APIStatisticTimeSet {
private ConcurrentHashMap<Long, ConcurrentHashMap<String, APIStatisticStruct>> apiTopStaticHelper = new ConcurrentHashMap<Long, ConcurrentHashMap<String, APIStatisticStruct>>();
private ConcurrentHashMap<String, ConcurrentHashMap<Long, APIStatisticStruct>> apiTopHourStaticHelper = new ConcurrentHashMap<>();
private Logger log = LoggerFactory.getLogger(APIStatisticTimeSet.class);
@Autowired
private FieryConfig fieryConfig;
@Autowired
private DBManage dbManage;
public void analyzeMetaLog(MetaLog metainfo) {
String url = metainfo.getUrl();
Long shardTime = DateTimeHelper.getTimesMorning(metainfo.getTime().longValue());
Long hourShardTime = DateTimeHelper.getHourTime(metainfo.getTime().longValue());
if (url.trim().length() > 0 && shardTime > 0 && shardTime > DateTimeHelper.getCurrentTime() -
(fieryConfig.getKeepdataday() * 86400)) {
//day
if (!apiTopStaticHelper.containsKey(shardTime)) {
//day shard
ConcurrentHashMap<String, APIStatisticStruct> urlshard = new ConcurrentHashMap<>();
APIStatisticStruct urlInfo = new APIStatisticStruct(metainfo, shardTime);
urlshard.put(url, urlInfo);
apiTopStaticHelper.put(shardTime, urlshard);
} else {
//count ++
if (!apiTopStaticHelper.get(shardTime).containsKey(url)) {
//day
APIStatisticStruct apiStruct = new APIStatisticStruct(metainfo, shardTime);
apiTopStaticHelper.get(shardTime).put(metainfo.getUrl(), apiStruct);
} else {
//day
apiTopStaticHelper.get(shardTime).get(metainfo.getUrl()).analyzeMetaLog(metainfo);
}
}
//hour
if (!apiTopHourStaticHelper.containsKey(url)) {
//hour shard
ConcurrentHashMap<Long, APIStatisticStruct> urlHourshard = new ConcurrentHashMap<>();
APIStatisticStruct urlHourInfo = new APIStatisticStruct(metainfo, hourShardTime);
urlHourshard.put(hourShardTime, urlHourInfo);
apiTopHourStaticHelper.put(url, urlHourshard);
} else {
if (!apiTopHourStaticHelper.get(url).containsKey(hourShardTime)) {
//hour
APIStatisticStruct apiHourStruct = new APIStatisticStruct(metainfo, hourShardTime);
apiTopHourStaticHelper.get(metainfo.getUrl()).put(hourShardTime, apiHourStruct);
} else {
//hour
apiTopHourStaticHelper.get(metainfo.getUrl()).get(hourShardTime).analyzeMetaLog(metainfo);
}
}
}//check the time
}
public Map<String, Integer> getAPITOPStatics() {
Map<String, Integer> result = new LinkedHashMap<>();
for (Map.Entry<Long, ConcurrentHashMap<String, APIStatisticStruct>> ent : apiTopStaticHelper.entrySet()) {
result.put(ent.getKey() + "", ent.getValue().size());
}
return result;
}
public TreeMap<Long, APIStatisticStruct> getHourDetail(String url, Long shardtime) {
TreeMap<Long, APIStatisticStruct> urlStatics = new TreeMap<>();
if (apiTopHourStaticHelper.containsKey(url)) {
//return apiTopHourStaticHelper.get(url);
ConcurrentHashMap<Long, APIStatisticStruct> staticsSet = apiTopHourStaticHelper.get(url);
for (Map.Entry<Long, APIStatisticStruct> statisticItem : staticsSet.entrySet()) {
if (statisticItem.getKey() >= shardtime && statisticItem.getKey() <= shardtime + 86400) {
urlStatics.put(DateTimeHelper.getHour(statisticItem.getKey()), statisticItem.getValue());
}
}
}
return urlStatics;
}
public ConcurrentHashMap<String, APIStatisticStruct> getDaySharder(Long timestamp, boolean create) {
Long shardTime = DateTimeHelper.getTimesMorning(timestamp);
if (!apiTopStaticHelper.containsKey(shardTime)) {
if (create) {
ConcurrentHashMap<String, APIStatisticStruct> urlshard = new ConcurrentHashMap<>();
apiTopStaticHelper.put(shardTime, urlshard);
return urlshard;
}
//default not create this one
return null;
} else {
return apiTopStaticHelper.get(shardTime);
}
}
@PostConstruct
public void loadStaticDb() {
log.info("load the Statistic info start...");
Gson jsonHelper = new Gson();
Map<String, String> dblist = dbManage.getDBFolderList();
for (Map.Entry<String, String> db : dblist.entrySet()) {
String dbshard = db.getKey();
Long dbShardLong;
//prevent the shard name is not long
try {
dbShardLong = Long.valueOf(dbshard);
} catch (Exception e) {
continue;
}
//init the set
ConcurrentHashMap<String, APIStatisticStruct> apiStatisticStructMap = new ConcurrentHashMap<>();
apiTopStaticHelper.put(dbShardLong, apiStatisticStructMap);
try {
DBSharder dbHelper = dbManage.getDB(dbShardLong);
if (dbHelper == null) {
log.info("load db fail:" + dbshard);
continue;
}
String staticStr = dbHelper.get("apitopstatistic");
if (staticStr == null || staticStr.length() == 0) {
log.info("load static db info fail:" + dbshard);
} else {
//recovery the statics
String[] staticArray = staticStr.split("\r\n");
for (int staticIndex = 0; staticIndex < staticArray.length; staticIndex++) {
try {
APIStatisticStruct apiStatisticStruct = jsonHelper.fromJson(staticArray[staticIndex], APIStatisticStruct.class);
apiTopStaticHelper.get(dbShardLong).put(apiStatisticStruct.getUrl(), apiStatisticStruct);
} catch (JsonSyntaxException e) {
e.printStackTrace();
}
}
}
String staticHourStr = dbHelper.get("apitophourstatistic");
if (staticHourStr == null || staticHourStr.length() == 0) {
log.info("load static hour db info fail:" + dbshard);
} else {
//recovery the statics
String[] staticArray = staticHourStr.split("\r\n");
for (int staticIndex = 0; staticIndex < staticArray.length; staticIndex++) {
try {
APIStatisticStruct apiStatisticStruct = jsonHelper.fromJson(staticArray[staticIndex], APIStatisticStruct.class);
if (!apiTopHourStaticHelper.containsKey(apiStatisticStruct.getUrl())) {
apiTopHourStaticHelper.put(apiStatisticStruct.getUrl(), new ConcurrentHashMap<>());
}
apiTopHourStaticHelper.get(apiStatisticStruct.getUrl()).put(apiStatisticStruct.getShardTime(), apiStatisticStruct);
} catch (JsonSyntaxException e) {
e.printStackTrace();
}
}
}
} catch (Exception e) {
e.printStackTrace();
log.error("load dbshard:" + dbshard + " error:" + e.getMessage());
}
}
}
@PreDestroy
public void dumpStaticDb() {
log.info("dump the Statistic info start...");
//loop day
for (Map.Entry<Long, ConcurrentHashMap<String, APIStatisticStruct>> ent : apiTopStaticHelper.entrySet()) {
StringBuilder staticSting = new StringBuilder();
StringBuilder staticHourString = new StringBuilder();
Long shardTime = ent.getKey();
ConcurrentHashMap<String, APIStatisticStruct> apiStatisticStructMap = ent.getValue();
//fetch all day total statics
for (Map.Entry<String, APIStatisticStruct> urlShard : apiStatisticStructMap.entrySet()) {
String jsonStr = urlShard.getValue().toJson();
if (jsonStr.trim().length() > 0) {
staticSting.append(jsonStr + "\r\n");
}
//store the Hour String
if (apiTopHourStaticHelper.containsKey(urlShard.getKey())) {
Long compareEnd = shardTime + 86400;
//check this url hour map
for (Map.Entry<Long, APIStatisticStruct> hourStatistic : apiTopHourStaticHelper.get(urlShard.getKey()).entrySet()) {
//filter
if (hourStatistic.getKey() <= compareEnd && hourStatistic.getKey() >= shardTime) {
staticHourString.append(hourStatistic.getValue().toJson() + "\r\n");
}
}
}
}
log.info("dump the Statistic info:" + shardTime + " len:" + apiStatisticStructMap.size());
log.info("dump the stattistic hour info:" + shardTime +" len:"+ staticHourString.toString().length());
DBSharder dbSharder = dbManage.getDB(shardTime);
//day
if (staticSting.length() > 0 && dbSharder != null) {
dbSharder.put("apitopstatistic", staticSting.toString());
}
//hour
if (staticHourString.length() > 0 && dbSharder != null) {
dbSharder.put("apitophourstatistic", staticHourString.toString());
}
}
}
@Scheduled(fixedRate = 120000)
public void cleanUpSharder() {
//clean up day
if (apiTopStaticHelper.size() > 0) {
ArrayList<Long> removeMap = new ArrayList<>();
for (Map.Entry<Long, ConcurrentHashMap<String, APIStatisticStruct>> ent : apiTopStaticHelper.entrySet()) {
if (ent.getKey() >= DateTimeHelper.getCurrentTime() - fieryConfig.getKeepdataday() * 86400) {
continue;
}
removeMap.add(ent.getKey());
}
for (Long removeKey : removeMap) {
log.info("Clean up the API Top Statistic:" + removeKey);
apiTopStaticHelper.remove(removeKey);
}
}
//clean up hour
if (apiTopHourStaticHelper.size() > 0) {
ArrayList<String> removeUrlMap = new ArrayList<>();
for (Map.Entry<String, ConcurrentHashMap<Long, APIStatisticStruct>> ent : apiTopHourStaticHelper.entrySet()) {
ArrayList<Long> removeMap = new ArrayList<>();
for (Map.Entry<Long, APIStatisticStruct> itemEnt : ent.getValue().entrySet()) {
if (itemEnt.getKey() >= DateTimeHelper.getCurrentTime() - fieryConfig.getKeepdataday() * 86400) {
continue;
}
removeMap.add(itemEnt.getKey());
}
for (Long removeKey : removeMap) {
log.info("Clean up the API Top Day Statistic:" + removeKey);
apiTopHourStaticHelper.get(ent.getKey()).remove(removeKey);
if (apiTopHourStaticHelper.get(ent.getKey()).size() == 0) {
removeUrlMap.add(ent.getKey());
}
}
}
//remove the url
for (String removeUrlKey : removeUrlMap) {
apiTopHourStaticHelper.remove(removeUrlKey);
}
}
//cycle dump the statistics
dumpStaticDb();
}
}
| 6,240 |
1,733 | /* sha3sum.c - Keccak-f[1600] permutation, sponge construction
*
* Copyright 2014 <NAME> <<EMAIL>>
*
* https://keccak.team/files/Keccak-reference-3.0.pdf
* https://csrc.nist.gov/publications/detail/fips/202/final
* https://nvlpubs.nist.gov/nistpubs/specialpublications/nist.sp.800-185.pdf
// Depends on FLAG(b) being 4
USE_SHA3SUM(NEWTOY(sha3sum, "bSa#<128>512=224", TOYFLAG_USR|TOYFLAG_BIN))
config SHA3SUM
bool "sha3sum"
default y
help
usage: sha3sum [-S] [-a BITS] [FILE...]
Hash function du jour.
-a Produce a hash BITS long (default 224)
-b Brief (hash only, no filename)
-S Use SHAKE termination byte instead of SHA3 (ask FIPS why)
*/
#define FOR_sha3sum
#include "toys.h"
GLOBALS(
long a;
unsigned long long rc[24];
)
static const char rho[] =
{1,3,6,10,15,21,28,36,45,55,2,14,27,41,56,8,25,43,62,18,39,61,20,44};
static const char pi[] =
{10,7,11,17,18,3,5,16,8,21,24,4,15,23,19,13,12,2,20,14,22,9,6,1};
static const char rcpack[] =
{0x33,0x07,0xdd,0x16,0x38,0x1b,0x7b,0x2b,0xad,0x6a,0xce,0x4c,0x29,0xfe,0x31,
0x68,0x9d,0xb0,0x8f,0x2f,0x0a};
static void keccak(unsigned long long *a)
{
unsigned long long b[5] = {0}, t;
int i, x, y;
for (i = 0; i < 24; i++) {
for (x = 0; x<5; x++) for (b[x] = 0, y = 0; y<25; y += 5) b[x] ^= a[x+y];
for (x = 0; x<5; x++) for (y = 0; y<25; y += 5) {
t = b[(x+1)%5];
a[y+x] ^= b[(x+4)%5]^(t<<1|t>>63);
}
for (t = a[1], x = 0; x<24; x++) {
*b = a[pi[x]];
a[pi[x]] = (t<<rho[x])|(t>>(64-rho[x]));
t = *b;
}
for (y = 0; y<25; y += 5) {
for (x = 0; x<5; x++) b[x] = a[y + x];
for (x = 0; x<5; x++) a[y + x] = b[x]^((~b[(x+1)%5])&b[(x+2)%5]);
}
*a ^= TT.rc[i];
}
}
static void do_sha3sum(int fd, char *name)
{
int span, ii, len, rate = 200-TT.a/4;
char *ss = toybuf, buf[200];
memset(buf, 0, sizeof(buf));
for (len = 0;; ss += rate) {
if ((span = len-(ss-toybuf))<rate) {
memcpy(toybuf, ss, span);
len = span += readall(fd, (ss = toybuf)+span, sizeof(toybuf)-span);
}
if (span>rate) span = rate;
for (ii = 0; ii<span; ii++) buf[ii] ^= ss[ii];
if (rate!=span) {
buf[span] ^= FLAG(S) ? 0x1f : 0x06;
buf[rate-1] ^= 0x80;
}
keccak((void *)buf);
if (span<rate) break;
}
for (ii = 0; ii<TT.a/8; ) {
printf("%02x", buf[ii%rate]);
if (!(++ii%rate)) keccak((void *)buf);
}
memset(buf, 0, sizeof(buf));
// Depends on FLAG(b) being 4
xprintf(" %s\n"+FLAG(b), name);
}
// TODO test 224 256 384 512, and shake 128 256
void sha3sum_main(void)
{
int i, j, k;
char *s;
// Decompress RC table
for (s = (void *)rcpack, i = 127; i; s += 3) for (i>>=1,k = j = 0; k<24; k++)
if (1&(s[k>>3]>>(7-(k&7)))) TT.rc[k] |= 1ULL<<i;
loopfiles(toys.optargs, do_sha3sum);
}
| 1,482 |
639 | <reponame>nox-410/nnfusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Author: wenxh
# This script is to be used as batch system intergration test in Azure Build Agent
import os
import sys
import subprocess
import multiprocessing
import logging
import json
import pprint
import numpy as np
import testcases
import evaluator
import socket
class TestsManager:
def __init__(self):
config_json = "config.json"
if len(sys.argv) == 2:
config_json = sys.argv[1]
self.load_config(config_json)
# overwrite if config.json specified
if len(sys.argv) > 2:
self.models = sys.argv[1]
self.nnfusion_cli = sys.argv[2]
if not os.path.exists(self.models):
self.models = self.load_default_models_path()
if not os.path.exists(self.nnfusion_cli):
self.nnfusion_cli = self.load_default_nnfusion_cli()
if not os.path.exists(self.testcase_configs):
self.testcase_configs= os.path.join(os.path.dirname(
os.path.abspath(__file__)), "./testcase_configs")
self.capability = set()
self.capability_detect()
# is a dict()
self.enabled_tags = self.user_enabled_tags
logging.info("models folder = " + self.models)
logging.info("testcase configs folder = " + self.testcase_configs)
logging.info("nnfusion cli = " + self.nnfusion_cli)
logging.info("device capability = " + ",".join(list(self.capability)))
logging.info("enabled tags = " + str(self.enabled_tags))
def load_config(self, config_json):
self.user_device_capability = set()
self.user_enabled_tags = dict()
self.models = ""
self.nnfusion_cli = ""
self.nnfusion_args = ""
self.testcase_configs = ""
if not os.path.exists(config_json):
config_json = os.path.join(os.path.dirname(
os.path.abspath(__file__)), config_json)
if not os.path.exists(config_json):
return
logging.info("load config from: " + config_json)
with open(config_json, 'r') as f:
data = json.load(f)
# env operations
if "env" in data.keys():
env_ops = data["env"]
for env in env_ops.keys():
# set, append, clear, etc ...
if 'set' in env_ops[env].keys():
os.environ[env] = str(env_ops[env]['set'])
if 'append' in env_ops[env].keys():
if os.getenv(env) is None:
os.environ[env] = str(env_ops[env]['append'])
else:
os.environ[env] = os.getenv(
env) + ":" + str(env_ops[env]['append'])
if 'clear' in env_ops[env].keys():
os.environ[env] = ""
if 'del' in env_ops[env].keys():
if env in os.environ.keys():
del os.environ[env]
logging.info("\t" + env + " = " + str(os.environ[env]))
if "device_capability" in data.keys():
self.user_device_capability = set(data["device_capability"])
if "enabled_tags" in data.keys():
self.user_enabled_tags = data["enabled_tags"]
if "models" in data.keys():
self.models = data["models"]
if "nnfusion_cli" in data.keys():
self.nnfusion_cli = data["nnfusion_cli"]
if "nnfusion_args" in data.keys():
self.nnfusion_args = data["nnfusion_args"]
if "testcase_configs" in data.keys():
self.testcase_configs = data["testcase_configs"]
def load_default_nnfusion_cli(self):
nnf_clis = [os.path.join(os.path.dirname(os.path.abspath(
__file__)), "../../../build/src/tools/nnfusion/nnfusion"), "/usr/local/bin/nnfusion"]
for nnf in nnf_clis:
if os.path.exists(nnf):
print("NNFusion CLI detected: " + nnf)
return nnf
logging.error("No nnfusion cli available.")
exit(1)
def load_default_models_path(self):
models_path = [os.path.join(os.path.dirname(
os.path.abspath(__file__)), "../../../../frozenmodels"), os.path.join(os.path.dirname(
os.path.abspath(__file__)), "../../../models/frozenmodels")]
for models in models_path:
if os.path.exists(models):
print("models/ folder detected: " + models)
return models
logging.error("No models folder available.")
exit(1)
def capability_detect(self):
# Detect Cuda
if os.path.exists("/usr/local/cuda/bin/nvcc"):
self.capability.add("CUDA")
logging.info("NVCC is existed.")
if os.path.exists("/opt/rocm/bin/hcc"):
self.capability.add("ROCM")
logging.info("HCC is existed.")
self.capability.add("CPU")
if len(self.user_device_capability) > 0:
self.capability = self.capability.intersection(
self.user_device_capability)
def load_test_cases(self, enabled_tags=set("correctness")):
tests = testcases.load_tests(self.models, self.testcase_configs)
newlist = []
for test in tests:
avail = False
for tag in test.tags:
if tag in enabled_tags:
avail = True
break
if avail:
newlist.append(test)
return newlist
def report(self):
manager = multiprocessing.Manager()
report_list = manager.list()
jobs = []
for dev in self.capability:
p = multiprocessing.Process(target=evaluator.E2EExecutor, args=(
self.load_test_cases(self.enabled_tags[dev]), dev, report_list, self.nnfusion_cli, self.nnfusion_args))
jobs.append(p)
p.start()
if 'SIDECLI' in os.environ:
p = multiprocessing.Process(
target=evaluator.CLIExecutor, args=("", report_list))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
hostname = socket.gethostname()
print("=========================================\n\n")
print(hostname + "\tE2E Model report")
print("\n\n=========================================\n")
report = ("\n".join(report_list))
print(report)
if "Failed" in report:
return -1
return 0
if __name__ == "__main__":
LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
logging.basicConfig(level=LOGLEVEL)
_m = TestsManager()
exit(_m.report())
| 3,455 |
359 | <reponame>roannav/learntools
track = dict(
author_username='dansbecker',
course_name='Deep Learning',
course_url='https://www.kaggle.com/learn/deep-learning',
course_forum_url='https://www.kaggle.com/learn-forum/161321'
)
lessons = [ {'topic': topic_name} for topic_name in
[
'Intro to Deep Learning and Computer Vision',
'Building Models from Convolutions',
'TensorFlow programming',
'Transfer Learning',
'Data Augmentation',
'A Deeper Understanding of Deep Learning',
'Deep Learning from Scratch',
'Dropout and Strides for Larger Models',
'Create Your First Submission'
]
]
notebooks = [
dict(
filename='tut1_intro.ipynb',
lesson_idx=0,
type='tutorial',
),
dict(
filename='ex1_convolutions.ipynb',
lesson_idx=0,
type='exercise',
scriptid=499266,
dataset_sources = ["keras/resnet50"],
competition_sources = ["dog-breed-identification"],
),
dict(
filename='tut2_building_models_from_convolutions.ipynb',
lesson_idx=1,
type='tutorial',
),
dict(
filename='tut3_programming_tf_and_keras.ipynb',
lesson_idx=2,
type='tutorial',
dataset_sources = ["keras/resnet50"],
competition_sources = ["dog-breed-identification"],
),
dict(
filename='ex3_programming_tf_and_keras.ipynb',
lesson_idx=2,
type='exercise',
enable_gpu=True,
scriptid=521452,
dataset_sources = [
"alexisbcook/resnet50",
"alexisbcook/vgg16",
"dansbecker/hot-dog-not-hot-dog"
],
),
dict(
filename='tut4_transfer_learning.ipynb',
lesson_idx=3,
type='tutorial',
dataset_sources = [
"keras/resnet50",
"dansbecker/urban-and-rural-photos"
],
),
dict(
filename='ex4_transfer_learning.ipynb',
lesson_idx=3,
type='exercise',
scriptid=532365,
dataset_sources = [
"alexisbcook/resnet50",
"dansbecker/dogs-gone-sideways"
],
enable_gpu=True,
),
dict(
filename='tut5_data_augmentation.ipynb',
lesson_idx=4,
type='tutorial',
dataset_sources = [
"keras/resnet50",
"dansbecker/urban-and-rural-photos",
],
),
dict(
filename='ex5_data_augmentation.ipynb',
lesson_idx=4,
type='exercise',
enable_gpu=True,
scriptid=536195,
dataset_sources = [
"alexisbcook/resnet50",
"dansbecker/dogs-gone-sideways"
],
),
dict(
filename='tut6_deep_understanding.ipynb',
lesson_idx=5,
type='tutorial',
),
dict(filename='tut7_dl_from_scratch.ipynb',
lesson_idx=6,
type='tutorial',
dataset_sources = ['zalando-research/fashionmnist'],
competition_sources=['digit-recognizer'],
),
dict(
filename='ex7_from_scratch.ipynb',
lesson_idx=6,
enable_gpu=True,
type='exercise',
scriptid=574269,
competition_sources=['digit-recognizer'],
dataset_sources = ['zalando-research/fashionmnist'],
),
dict(
filename='tut8_dropout_and_strides.ipynb',
lesson_idx=7,
type='tutorial',
competition_sources=['digit-recognizer'],
dataset_sources = ['zalando-research/fashionmnist'],
),
dict(
filename='ex8_dropout_strides.ipynb',
lesson_idx=7,
enable_gpu=True,
type='exercise',
scriptid=663261,
competition_sources=['digit-recognizer'],
dataset_sources = ['zalando-research/fashionmnist'],
),
dict(
filename='tut_tpus.ipynb',
lesson_idx=8,
type='tutorial',
competition_sources=['tpu-getting-started'],
enable_internet=True
),
dict(
filename='ex_tpus.ipynb',
lesson_idx=8,
type='exercise',
scriptid=10204702,
competition_sources=['tpu-getting-started'],
enable_internet=True
)
]
| 1,974 |
593 | #if defined(ESP8266) || defined(ESP32)
#include <pgmspace.h>
#else
#include <avr/pgmspace.h>
#endif
// 24 x 24 gridicons_cloud
const unsigned char gridicons_cloud[] PROGMEM = { /* 0X01,0X01,0XB4,0X00,0X40,0X00, */
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xC1, 0xFF, 0xFF, 0x00, 0x7F, 0xFF,
0x00, 0x7F, 0xFE, 0x00, 0x3F, 0xFC, 0x00, 0x1F,
0xFC, 0x00, 0x1F, 0xF0, 0x00, 0x1F, 0xE0, 0x00,
0x1F, 0xC0, 0x00, 0x07, 0x80, 0x00, 0x03, 0x80,
0x00, 0x01, 0x80, 0x00, 0x01, 0x80, 0x00, 0x01,
0xC0, 0x00, 0x01, 0xE0, 0x00, 0x03, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
};
| 432 |
1,163 | // Copyright 2021 The IREE Authors
//
// Licensed under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#include "libm.h"
// https://en.cppreference.com/w/c/numeric/math/fma
LIBRT_EXPORT float fmaf(float x, float y, float z) {
// TODO(*): a real implementation :)
return (x * y) + z;
}
| 145 |
3,095 | #ifndef APPDATA_H
#define APPDATA_H
#include "head.h"
class AppData
{
public:
//全局变量
static QStringList Intervals;
static QStringList Datas;
static QStringList Keys;
static QStringList Values;
//读取发送数据列表
static QString SendFileName;
static void readSendData();
//读取转发数据列表
static QString DeviceFileName;
static void readDeviceData();
//保存数据到文件
static void saveData(const QString &data);
//添加网卡IP地址到下拉框
static void loadIP(QComboBox *cbox);
};
#endif // APPDATA_H
| 278 |
481 | <filename>riposte/guides.py
import ast
from typing import Any, AnyStr, Callable, Dict, Text, Tuple
from riposte.exceptions import GuideError
def literal(value: str) -> Any:
try:
return ast.literal_eval(value)
except Exception:
raise GuideError(value, literal)
def encode(value: str) -> Any:
try:
return value.encode()
except Exception:
raise GuideError(value, encode)
def get_guides(annotation) -> Tuple[Callable]:
""" Based on given annotation get chain of guides. """
if annotation in (str, AnyStr, Text):
return ()
elif annotation is bytes:
return (encode,)
else:
return (literal,)
def extract_guides(func: Callable) -> Dict[str, Tuple[Callable]]:
""" Extract guides out of type-annotations. """
return {
arg: get_guides(annotation)
for arg, annotation in func.__annotations__.items()
}
| 352 |
412 | #include <assert.h>
int main()
{
while(1 == 1)
__CPROVER_assigns() __CPROVER_loop_invariant(1 == 1)
{
}
}
| 63 |
6,989 | # encoding: utf-8
"""Miscellaneous context managers.
"""
import warnings
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
class preserve_keys(object):
"""Preserve a set of keys in a dictionary.
Upon entering the context manager the current values of the keys
will be saved. Upon exiting, the dictionary will be updated to
restore the original value of the preserved keys. Preserved keys
which did not exist when entering the context manager will be
deleted.
Examples
--------
>>> d = {'a': 1, 'b': 2, 'c': 3}
>>> with preserve_keys(d, 'b', 'c', 'd'):
... del d['a']
... del d['b'] # will be reset to 2
... d['c'] = None # will be reset to 3
... d['d'] = 4 # will be deleted
... d['e'] = 5
... print(sorted(d.items()))
...
[('c', None), ('d', 4), ('e', 5)]
>>> print(sorted(d.items()))
[('b', 2), ('c', 3), ('e', 5)]
"""
def __init__(self, dictionary, *keys):
self.dictionary = dictionary
self.keys = keys
def __enter__(self):
# Actions to perform upon exiting.
to_delete = []
to_update = {}
d = self.dictionary
for k in self.keys:
if k in d:
to_update[k] = d[k]
else:
to_delete.append(k)
self.to_delete = to_delete
self.to_update = to_update
def __exit__(self, *exc_info):
d = self.dictionary
for k in self.to_delete:
d.pop(k, None)
d.update(self.to_update)
class NoOpContext(object):
"""
Deprecated
Context manager that does nothing."""
def __init__(self):
warnings.warn("""NoOpContext is deprecated since IPython 5.0 """,
DeprecationWarning, stacklevel=2)
def __enter__(self): pass
def __exit__(self, type, value, traceback): pass
| 863 |
677 | /*
* Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "InferredTypeTable.h"
#include "JSCInlines.h"
namespace JSC {
const ClassInfo InferredTypeTable::s_info = { "InferredTypeTable", 0, 0, CREATE_METHOD_TABLE(InferredTypeTable) };
InferredTypeTable* InferredTypeTable::create(VM& vm)
{
InferredTypeTable* result = new (NotNull, allocateCell<InferredTypeTable>(vm.heap)) InferredTypeTable(vm);
result->finishCreation(vm);
return result;
}
void InferredTypeTable::destroy(JSCell* cell)
{
InferredTypeTable* inferredTypeTable = static_cast<InferredTypeTable*>(cell);
inferredTypeTable->InferredTypeTable::~InferredTypeTable();
}
Structure* InferredTypeTable::createStructure(VM& vm, JSGlobalObject* globalObject, JSValue prototype)
{
return Structure::create(vm, globalObject, prototype, TypeInfo(CellType, StructureFlags), info());
}
void InferredTypeTable::visitChildren(JSCell* cell, SlotVisitor& visitor)
{
InferredTypeTable* inferredTypeTable = jsCast<InferredTypeTable*>(cell);
ConcurrentJSLocker locker(inferredTypeTable->m_lock);
for (auto& entry : inferredTypeTable->m_table) {
auto entryValue = entry.value;
if (!entryValue)
continue;
if (entryValue->isRelevant())
visitor.append(entryValue);
else
entry.value.clear();
}
}
InferredType* InferredTypeTable::get(const ConcurrentJSLocker&, UniquedStringImpl* uid)
{
auto iter = m_table.find(uid);
if (iter == m_table.end())
return nullptr;
InferredType* entryValue = iter->value.get();
if (!entryValue)
return nullptr;
// Take this opportunity to prune invalidated types.
if (!entryValue->isRelevant()) {
iter->value.clear();
return nullptr;
}
return entryValue;
}
InferredType* InferredTypeTable::get(UniquedStringImpl* uid)
{
ConcurrentJSLocker locker(m_lock);
return get(locker, uid);
}
InferredType* InferredTypeTable::get(PropertyName propertyName)
{
return get(propertyName.uid());
}
bool InferredTypeTable::willStoreValue(
VM& vm, PropertyName propertyName, JSValue value, StoredPropertyAge age)
{
// The algorithm here relies on the fact that only one thread modifies the hash map.
if (age == OldProperty) {
TableType::iterator iter = m_table.find(propertyName.uid());
if (iter == m_table.end())
return false; // Absence on replace => top.
InferredType* entryValue = iter->value.get();
if (!entryValue)
return false;
if (entryValue->willStoreValue(vm, propertyName, value))
return true;
iter->value.clear();
return false;
}
TableType::AddResult result;
{
ConcurrentJSLocker locker(m_lock);
result = m_table.add(propertyName.uid(), WriteBarrier<InferredType>());
}
InferredType* entryValue = result.iterator->value.get();
if (result.isNewEntry) {
InferredType* inferredType = InferredType::create(vm);
WTF::storeStoreFence();
result.iterator->value.set(vm, this, inferredType);
entryValue = inferredType;
} else if (!entryValue)
return false;
if (entryValue->willStoreValue(vm, propertyName, value))
return true;
result.iterator->value.clear();
return false;
}
void InferredTypeTable::makeTop(VM& vm, PropertyName propertyName, StoredPropertyAge age)
{
// The algorithm here relies on the fact that only one thread modifies the hash map.
if (age == OldProperty) {
TableType::iterator iter = m_table.find(propertyName.uid());
if (iter == m_table.end())
return; // Absence on replace => top.
InferredType* entryValue = iter->value.get();
if (!entryValue)
return;
entryValue->makeTop(vm, propertyName);
iter->value.clear();
return;
}
TableType::AddResult result;
{
ConcurrentJSLocker locker(m_lock);
result = m_table.add(propertyName.uid(), WriteBarrier<InferredType>());
}
if (!result.iterator->value)
return;
result.iterator->value->makeTop(vm, propertyName);
result.iterator->value.clear();
}
InferredTypeTable::InferredTypeTable(VM& vm)
: Base(vm, vm.inferredTypeTableStructure.get())
{
}
InferredTypeTable::~InferredTypeTable()
{
}
} // namespace JSC
| 2,089 |
665 | # clue-plotter v1.14
# Sensor and input plotter for Adafruit CLUE in CircuitPython
# This plots the sensors and three of the analogue inputs on
# the LCD display either with scrolling or wrap mode which
# approximates a slow timebase oscilloscope, left button selects
# next source or with long press changes palette or longer press
# turns on output for Mu plotting, right button changes plot style
# Tested with an Adafruit CLUE (Alpha) and CircuitPython and 5.0.0
# copy this file to CLUE board as code.py
# needs companion plot_sensor.py and plotter.py files
# MIT License
# Copyright (c) 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import gc
import board
from plotter import Plotter
from plot_source import PlotSource, TemperaturePlotSource, PressurePlotSource, \
HumidityPlotSource, ColorPlotSource, ProximityPlotSource, \
IlluminatedColorPlotSource, VolumePlotSource, \
AccelerometerPlotSource, GyroPlotSource, \
MagnetometerPlotSource, PinPlotSource
from adafruit_clue import clue
debug = 1
# A list of all the data sources for plotting
sources = [TemperaturePlotSource(clue, mode="Celsius"),
TemperaturePlotSource(clue, mode="Fahrenheit"),
PressurePlotSource(clue, mode="Metric"),
PressurePlotSource(clue, mode="Imperial"),
HumidityPlotSource(clue),
ColorPlotSource(clue),
ProximityPlotSource(clue),
IlluminatedColorPlotSource(clue, mode="Red"),
IlluminatedColorPlotSource(clue, mode="Green"),
IlluminatedColorPlotSource(clue, mode="Blue"),
IlluminatedColorPlotSource(clue, mode="Clear"),
VolumePlotSource(clue),
AccelerometerPlotSource(clue),
GyroPlotSource(clue),
MagnetometerPlotSource(clue),
PinPlotSource([board.P0, board.P1, board.P2])
]
# The first source to select when plotting starts
current_source_idx = 0
# The various plotting styles - scroll is currently a jump scroll
stylemodes = (("lines", "scroll"), # draws lines between points
("lines", "wrap"),
("dots", "scroll"), # just points - slightly quicker
("dots", "wrap")
)
current_sm_idx = 0
def d_print(level, *args, **kwargs):
"""A simple conditional print for debugging based on global debug level."""
if not isinstance(level, int):
print(level, *args, **kwargs)
elif debug >= level:
print(*args, **kwargs)
def select_colors(plttr, src, def_palette):
"""Choose the colours based on the particular PlotSource
or forcing use of default palette."""
# otherwise use defaults
channel_colidx = []
palette = plttr.get_colors()
colors = PlotSource.DEFAULT_COLORS if def_palette else src.colors()
for col in colors:
try:
channel_colidx.append(palette.index(col))
except ValueError:
channel_colidx.append(PlotSource.DEFAULT_COLORS.index(col))
return channel_colidx
def ready_plot_source(plttr, srcs, def_palette, index=0):
"""Select the plot source by index from srcs list and then setup the
plot parameters by retrieving meta-data from the PlotSource object."""
src = srcs[index]
# Put the description of the source on screen at the top
source_name = str(src)
d_print(1, "Selecting source:", source_name)
plttr.clear_all()
plttr.title = source_name
plttr.y_axis_lab = src.units()
# The range on graph will start at this value
plttr.y_range = (src.initial_min(), src.initial_max())
plttr.y_min_range = src.range_min()
# Sensor/data source is expected to produce data between these values
plttr.y_full_range = (src.min(), src.max())
channels_from_src = src.values()
plttr.channels = channels_from_src # Can be between 1 and 3
plttr.channel_colidx = select_colors(plttr, src, def_palette)
src.start()
return (src, channels_from_src)
def wait_release(func, menu):
"""Calls func repeatedly waiting for it to return a false value
and goes through menu list as time passes.
The menu is a list of menu entries where each entry is a
two element list of time passed in seconds and text to display
for that period.
The entries must be in ascending time order."""
start_t_ns = time.monotonic_ns()
menu_option = None
selected = False
for menu_option, menu_entry in enumerate(menu):
menu_time_ns = start_t_ns + int(menu_entry[0] * 1e9)
menu_text = menu_entry[1]
if menu_text:
plotter.info = menu_text
while time.monotonic_ns() < menu_time_ns:
if not func():
selected = True
break
if menu_text:
plotter.info = ""
if selected:
break
return (menu_option, (time.monotonic_ns() - start_t_ns) * 1e-9)
def popup_text(plttr, text, duration=1.0):
"""Place some text on the screen using info property of Plotter object
for duration seconds."""
plttr.info = text
time.sleep(duration)
plttr.info = None
mu_plotter_output = False
range_lock = False
initial_title = "CLUE Plotter"
# displayio has some static limits on text - pre-calculate the maximum
# length of all of the different PlotSource objects
max_title_len = max(len(initial_title), max([len(str(so)) for so in sources]))
plotter = Plotter(board.DISPLAY,
style=stylemodes[current_sm_idx][0],
mode=stylemodes[current_sm_idx][1],
title=initial_title,
max_title_len=max_title_len,
mu_output=mu_plotter_output,
debug=debug)
# If set to true this forces use of colour blindness friendly colours
use_def_pal = False
clue.pixel[0] = clue.BLACK # turn off the NeoPixel on the back of CLUE board
plotter.display_on()
# Using left and right here in case the CLUE is cased hiding A/B labels
popup_text(plotter,
"\n".join(["Button Guide",
"Left: next source",
" 2secs: palette",
" 4s: Mu plot",
" 6s: range lock",
"Right: style change"]), duration=10)
count = 0
while True:
# Set the source and start items
(source, channels) = ready_plot_source(plotter, sources,
use_def_pal,
current_source_idx)
while True:
# Read data from sensor or voltage from pad
all_data = source.data()
# Check for left (A) and right (B) buttons
if clue.button_a:
# Wait for button release with time-based menu
opt, _ = wait_release(lambda: clue.button_a,
[(2, "Next\nsource"),
(4,
("Source" if use_def_pal else "Default")
+ "\npalette"),
(6,
"Mu output "
+ ("off" if mu_plotter_output else "on")),
(8,
"Range lock\n" + ("off" if range_lock else "on"))
])
if opt == 0: # change plot source
current_source_idx = (current_source_idx + 1) % len(sources)
break # to leave inner while and select the new source
elif opt == 1: # toggle palette
use_def_pal = not use_def_pal
plotter.channel_colidx = select_colors(plotter, source,
use_def_pal)
elif opt == 2: # toggle Mu output
mu_plotter_output = not mu_plotter_output
plotter.mu_output = mu_plotter_output
else: # toggle range lock
range_lock = not range_lock
plotter.y_range_lock = range_lock
if clue.button_b: # change plot style and mode
current_sm_idx = (current_sm_idx + 1) % len(stylemodes)
(new_style, new_mode) = stylemodes[current_sm_idx]
wait_release(lambda: clue.button_b,
[(2, new_style + "\n" + new_mode)])
d_print(1, "Graph change", new_style, new_mode)
plotter.change_stylemode(new_style, new_mode)
# Display it
if channels == 1:
plotter.data_add((all_data,))
else:
plotter.data_add(all_data)
# An occasional print of free heap
if debug >=3 and count % 15 == 0:
gc.collect() # must collect() first to measure free memory
print("Free memory:", gc.mem_free())
count += 1
source.stop()
plotter.display_off()
| 4,307 |
879 | package org.zstack.header.network.l2;
import org.zstack.header.message.APIReply;
import org.zstack.header.rest.RestResponse;
import java.util.List;
import static java.util.Arrays.asList;
import java.sql.Timestamp;
@RestResponse(fieldsTo = "all")
public class APIGetCandidateL2NetworksForAttachingClusterReply extends APIReply {
private List<L2NetworkData> inventories;
public List<L2NetworkData> getInventories() {
return inventories;
}
public void setInventories(List<L2NetworkData> inventories) {
this.inventories = inventories;
}
public static APIGetCandidateL2NetworksForAttachingClusterReply __example__() {
APIGetCandidateL2NetworksForAttachingClusterReply reply = new APIGetCandidateL2NetworksForAttachingClusterReply();
L2NetworkData date = new L2NetworkData();
date.setName("l2");
date.setCreateDate(new Timestamp(org.zstack.header.message.DocUtils.date));
date.setLastOpDate(new Timestamp(org.zstack.header.message.DocUtils.date));
date.setDescription("test");
date.setPhysicalInterface("eth0");
reply.setInventories(asList(date));
return reply;
}
}
| 446 |
310 | {
"name": "Monokai Pro",
"description": "A colour scheme for text editors.",
"url": "https://monokai.pro/"
}
| 42 |
1,062 | /**
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.mr4c.algorithm;
import com.google.common.collect.Lists;
import com.google.mr4c.keys.DataKeyDimension;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import org.junit.*;
import static org.junit.Assert.*;
public class AlgorithmSchemaTest {
private AlgorithmSchema m_algoSchema;
private Set<String> m_requiredInputs;
private Set<String> m_optionalInputs;
private Set<String> m_excludedInputs;
@Before public void setup() throws Exception {
m_algoSchema = buildSchema();
buildInputSets();
}
@Test public void testEquals() {
AlgorithmSchema algoSchema = buildSchema();
assertEquals(m_algoSchema, algoSchema);
}
@Test public void testNotEqualInput() {
AlgorithmSchema algoSchema = buildSchema();
algoSchema.addInputDataset("xxxx");
assertFalse(m_algoSchema.equals(algoSchema));
}
@Test public void testNotEqualOptionalInput() {
AlgorithmSchema algoSchema = buildSchema();
algoSchema.addInputDataset("xxxx", true);
assertFalse(m_algoSchema.equals(algoSchema));
}
@Test public void testNotEqualExcludedInput() {
AlgorithmSchema algoSchema = buildSchema();
algoSchema.addInputDataset("xxxx", false, true);
assertFalse(m_algoSchema.equals(algoSchema));
}
@Test public void testNotEqualOutput() {
AlgorithmSchema algoSchema = buildSchema();
algoSchema.addOutputDataset("xxxx");
assertFalse(m_algoSchema.equals(algoSchema));
}
@Test public void testNotEqualDimension() {
AlgorithmSchema algoSchema = buildSchema();
algoSchema.addExpectedDimension(new DataKeyDimension("xxxx"));
assertFalse(m_algoSchema.equals(algoSchema));
}
@Test public void testRequiredInputs() {
assertEquals(m_requiredInputs, m_algoSchema.getRequiredInputDatasets());
}
@Test public void testOptionalInputs() {
assertEquals(m_optionalInputs, m_algoSchema.getOptionalInputDatasets());
}
@Test public void testExcludedInputs() {
assertEquals(m_excludedInputs, m_algoSchema.getExcludedInputDatasets());
}
private AlgorithmSchema buildSchema() {
return AlgorithmDataTestUtils.buildAlgorithmSchema();
}
private void buildInputSets() {
m_requiredInputs = new HashSet<String>(Arrays.asList("input1", "input2", "input4"));
m_optionalInputs = new HashSet<String>(Arrays.asList("input3", "input5"));
m_excludedInputs = new HashSet<String>(Arrays.asList("input4", "input5"));
}
}
| 1,047 |
1,872 | <reponame>aisk/ironpython3
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
##
## Test the io.StringIO
## based on cStringIO_test.py
##
import unittest
import io
from iptest import run_test
text = "Line 1\nLine 2\nLine 3\nLine 4\nLine 5"
class StringIOTest(unittest.TestCase):
def call_close(self, i):
self.assertEqual(i.closed, False)
i.close()
self.assertEqual(i.closed, True)
i.close()
self.assertEqual(i.closed, True)
i.close()
self.assertEqual(i.closed, True)
def call_isatty(self, i):
self.assertEqual(i.isatty(), False)
# read
def call_read(self, i):
self.assertEqual(i.read(), text)
self.assertEqual(i.read(), "")
self.assertEqual(i.read(), "")
i.close()
i.close()
self.assertRaises(ValueError, i.read)
# readline
def call_readline(self, i):
self.assertEqual(i.readline(), "Line 1\n")
self.assertEqual(i.readline(), "Line 2\n")
self.assertEqual(i.readline(), "Line 3\n")
self.assertEqual(i.readline(), "Line 4\n")
self.assertEqual(i.readline(), "Line 5")
self.assertEqual(i.readline(), "")
i.close()
self.assertRaises(ValueError, i.readline)
def call_readline_n(self, i):
self.assertEqual(i.readline(50), "Line 1\n")
self.assertEqual(i.readline(0), "")
self.assertEqual(i.readline(1), "L")
self.assertEqual(i.readline(9), "ine 2\n")
self.assertEqual(i.readline(50), "Line 3\n")
self.assertEqual(i.readline(6), "Line 4")
self.assertEqual(i.readline(50), "\n")
self.assertEqual(i.readline(50), "Line 5")
i.close()
self.assertRaises(ValueError, i.readline)
# readlines
def call_readlines(self, i):
self.assertEqual(i.readlines(), ["Line 1\n", "Line 2\n", "Line 3\n", "Line 4\n", "Line 5"])
self.assertEqual(i.readlines(), [])
i.close()
self.assertRaises(ValueError, i.readlines)
def call_readlines_n(self, i):
self.assertEqual(i.readlines(10), ["Line 1\n", "Line 2\n"])
self.assertEqual(i.readlines(50), ["Line 3\n", "Line 4\n", "Line 5"])
self.assertEqual(i.readlines(50), [])
i.close()
self.assertRaises(ValueError, i.readlines)
# getvalue
def call_getvalue(self, i):
self.assertEqual(i.getvalue(), text)
self.assertEqual(i.read(6), "Line 1")
self.assertEqual(i.getvalue(), text)
i.close()
self.assertRaises(ValueError, i.getvalue)
# __iter__, next
def call_next(self, i):
self.assertEqual(i.__iter__(), i)
self.assertEqual(next(i), "Line 1\n")
self.assertEqual(next(i), "Line 2\n")
self.assertEqual([l for l in i], ["Line 3\n", "Line 4\n", "Line 5"])
i.close()
self.assertRaises(ValueError, i.readlines)
# read, readline, reset
def call_reset(self, i):
self.assertEqual(i.read(0), "")
self.assertEqual(i.read(4), "Line")
self.assertEqual(i.readline(), " 1\n")
i.seek(0)
self.assertEqual(i.read(4), "Line")
self.assertEqual(i.readline(), " 1\n")
i.seek(0)
self.assertEqual(i.read(37),text)
i.seek(0)
self.assertEqual(i.read(38),text)
i.seek(0)
# seek, tell, read
def call_seek_tell(self, i):
self.assertEqual(i.read(4), "Line")
self.assertEqual(i.tell(), 4)
i.seek(10)
self.assertEqual(i.tell(), 10)
self.assertEqual(i.read(3), "e 2")
i.seek(15, 0)
self.assertEqual(i.tell(), 15)
self.assertEqual(i.read(5), "ine 3")
# seeking from current possition or from end is not supported unless offset is 0
#i.seek(3, 1)
#self.assertEqual(i.read(4), "ne 4")
#i.seek(-5, 2)
#self.assertEqual(i.tell(), len(text) - 5)
#self.assertEqual(i.read(), "ine 5")
i.seek(1000)
self.assertEqual(i.tell(), 1000)
self.assertEqual(i.read(), "")
i.seek(2000, 0)
self.assertEqual(i.tell(), 2000)
self.assertEqual(i.read(), "")
# seeking from current possition or from end is not supported unless offset is 0
# i.seek(400, 1)
#self.assertEqual(i.tell(), 2400)
#self.assertEqual(i.read(), "")
#i.seek(100, 2)
#self.assertEqual(i.tell(), len(text) + 100)
#self.assertEqual(i.read(), "")
i.close()
self.assertRaises(ValueError, i.tell)
self.assertRaises(ValueError, i.seek, 0)
self.assertRaises(ValueError, i.seek, 0, 2)
# truncate
def call_truncate(self, i):
self.assertEqual(i.read(6), "Line 1")
self.assertEqual(i.truncate(20), 20)
# self.assertEqual(i.tell(), 20)
self.assertEqual(i.getvalue(), "Line 1\nLine 2\nLine 3")
i.truncate(30)
self.assertEqual(i.tell(), 6)
self.assertEqual(i.getvalue(), "Line 1\nLine 2\nLine 3")
i.seek(0)
self.assertEqual(i.tell(), 0)
self.assertEqual(i.read(6), "Line 1")
i.truncate()
self.assertEqual(i.getvalue(), "Line 1")
i.close()
self.assertRaises(ValueError, i.truncate)
self.assertRaises(ValueError, i.truncate, 10)
# write
def call_write(self, o):
self.assertEqual(o.getvalue(), text)
o.write("Data")
self.assertRaises(TypeError, o.write, None)
o.write(" 1")
self.assertEqual(o.read(7), "\nLine 2")
self.assertEqual(o.getvalue(), "Data 1\nLine 2\nLine 3\nLine 4\nLine 5")
o.close()
self.assertRaises(ValueError, o.write, "Hello")
# writelines
def call_writelines(self, o):
self.assertEqual(o.getvalue(), text)
o.writelines(["Data 1", "Data 2"])
self.assertEqual(o.read(8), "2\nLine 3")
self.assertEqual(o.getvalue(), "Data 1Data 22\nLine 3\nLine 4\nLine 5")
self.assertRaises(TypeError, o.writelines, [None])
o.close()
self.assertRaises(ValueError, o.writelines, "Hello")
# softspace
def call_softspace(self, o):
o.write("Hello")
o.write("Hi")
o.softspace = 1
self.assertEqual(o.softspace, 1)
self.assertEqual(o.getvalue(), "HelloHiLine 2\nLine 3\nLine 4\nLine 5")
# flush
def call_flush(self, i):
i.flush()
self.assertEqual(i,i)
def init_StringI(self):
return io.StringIO(text)
def init_StringO(self):
o = io.StringIO()
o.write(text)
o.seek(0)
return o
def init_emptyStringI(self):
return io.StringIO("")
def test_empty(self):
i = self.init_emptyStringI()
# test closed
self.assertEqual(i.closed,False)
i.close()
self.assertEqual(i.closed,True)
#test read
i = self.init_emptyStringI()
self.assertEqual(i.read(),"")
i.close()
self.assertRaises(ValueError, i.read)
i.close()
self.assertRaises(ValueError, i.read, 2)
#test readline
i = self.init_emptyStringI()
self.assertEqual(i.readline(),"")
i.close()
self.assertRaises(ValueError, i.readline)
i = self.init_emptyStringI()
self.assertEqual(i.readline(0),"")
i.close()
self.assertRaises(ValueError, i.readline)
#test readlines
i = self.init_emptyStringI()
self.assertEqual(i.readlines(),[])
i = self.init_emptyStringI()
self.assertEqual(i.readlines(0),[])
#test getvalue
i = self.init_emptyStringI()
self.assertEqual(i.getvalue(),"")
# getvalue does not accept argument
# self.assertEqual(i.getvalue(True),"")
i.close()
self.assertRaises(ValueError, i.getvalue)
#test iter
i = self.init_emptyStringI()
self.assertEqual(i.__iter__(), i)
#test reset
i = self.init_emptyStringI()
self.assertEqual(i.read(0), "")
i.seek(0)
self.assertEqual(i.read(1), "")
i.seek(0)
self.assertEqual(i.readline(), "")
i.close()
self.assertRaises(ValueError, i.read, 2)
self.assertRaises(ValueError, i.readline)
#test seek,tell,read
i = self.init_emptyStringI()
self.assertEqual(i.read(0), "")
self.assertEqual(i.tell(), 0)
self.assertEqual(i.read(1), "")
self.assertEqual(i.tell(), 0)
i.seek(2)
self.assertEqual(i.tell(), 2)
self.assertEqual(i.read(),"")
i.close()
self.assertRaises(ValueError, i.tell)
self.assertRaises(ValueError, i.seek, 0)
self.assertRaises(ValueError, i.seek, 0, 2)
#test truncate
i = self.init_emptyStringI()
i.truncate(0)
self.assertEqual(i.tell(), 0)
i.truncate(1)
self.assertEqual(i.tell(), 0)
i.close()
self.assertRaises(ValueError, i.truncate)
def test_cp8567(self):
for x in ["", "1", "12", "12345"]:
for i in [5, 6, 7, 2**8, 100, 2**16-1, 2**16, 2**16, 2**31-2, 2**31-1]:
cio = io.StringIO(x)
# make sure it doesn't thorow and it doesn't change seek position
cio.truncate(i)
self.assertEqual(cio.tell(), 0)
cio.close()
def test_i_o(self):
for t in [ self.call_close,
self.call_isatty,
self.call_read,
self.call_readline,
self.call_readline_n,
self.call_readlines,
self.call_readlines_n,
self.call_getvalue,
self.call_next,
self.call_reset,
self.call_seek_tell,
self.call_truncate,
self.call_flush ]:
i = self.init_StringI()
t(i)
o = self.init_StringO()
t(o)
def test_o(self):
for t in [ self.call_write,
self.call_writelines,
self.call_softspace ]:
o = self.init_StringO()
t(o)
def test_cp22017(self):
m = io.StringIO()
m.seek(2)
m.write("hello!")
self.assertEqual(m.getvalue(), '\x00\x00hello!')
m.seek(2)
self.assertEqual(m.getvalue(), '\x00\x00hello!')
# tests from <NAME>, cp34683
def test_read(self):
# test stringio is readable
with io.StringIO("hello world\r\n") as infile:
self.assertSequenceEqual(infile.readline(), "hello world\r\n")
def test_seekable(self):
# test stringio is seekable
with io.StringIO("hello") as infile:
infile.seek(0, 2)
infile.write(" world\r\n")
self.assertSequenceEqual(infile.getvalue(), "hello world\r\n")
def test_write(self):
# test stringio is writable
with io.StringIO() as output_file:
output_file.write("hello")
output_file.write(" world\n")
self.assertSequenceEqual(output_file.getvalue(), "hello world\n")
# test from cp26105
def test_redirect(self):
import sys
stdout_save = sys.stdout
capture = io.StringIO()
sys.stdout = capture
print("Testing")
sys.stdout = stdout_save
self.assertEqual(capture.getvalue(), "Testing\n")
run_test(__name__)
| 5,944 |
4,920 | {"hmacKey": {"hmacKeyString": "<KEY>", "size": 256}, "size": 256, "aesKeyString": "<KEY>", "mode": "CBC"} | 41 |
474 | package org.javacord.api.event.server.emoji;
/**
* A custom emoji delete event.
*/
public interface KnownCustomEmojiDeleteEvent extends KnownCustomEmojiEvent {
}
| 51 |
746 | # Copyright (c) OpenMMLab. All rights reserved.
from .module import C2, C, func
__all__ = ['func', 'C', 'C2']
| 42 |
343 | from django import forms
from django.utils.translation import ugettext_lazy as _
from mayan.apps.acls.models import AccessControlList
from mayan.apps.common.settings import (
setting_project_title, setting_project_url
)
from mayan.apps.views.forms import BackendDynamicForm
from .classes import MailerBackend
from .models import UserMailer
from .permissions import permission_user_mailer_use
from .settings import (
setting_attachment_body_template, setting_attachment_subject_template,
setting_document_link_body_template, setting_document_link_subject_template
)
from .validators import validate_email_multiple
class ObjectMailForm(forms.Form):
def __init__(self, *args, **kwargs):
as_attachment = kwargs.pop('as_attachment', False)
user = kwargs.pop('user', None)
super().__init__(*args, **kwargs)
if as_attachment:
self.fields[
'subject'
].initial = setting_attachment_subject_template.value
self.fields[
'body'
].initial = setting_attachment_body_template.value % {
'project_title': setting_project_title.value,
'project_website': setting_project_url.value
}
else:
self.fields[
'subject'
].initial = setting_document_link_subject_template.value
self.fields['body'].initial = setting_document_link_body_template.value % {
'project_title': setting_project_title.value,
'project_website': setting_project_url.value
}
queryset = AccessControlList.objects.restrict_queryset(
permission=permission_user_mailer_use,
queryset=UserMailer.objects.filter(enabled=True), user=user
)
self.fields['user_mailer'].queryset = queryset
try:
self.fields['user_mailer'].initial = queryset.get(default=True)
except UserMailer.DoesNotExist:
pass
email = forms.CharField(
help_text=_(
'Email address of the recipient. Can be multiple addresses '
'separated by comma or semicolon.'
), label=_('Email address'), validators=[validate_email_multiple]
)
subject = forms.CharField(label=_('Subject'), required=False)
body = forms.CharField(
label=_('Body'), widget=forms.widgets.Textarea(), required=False
)
user_mailer = forms.ModelChoiceField(
help_text=_(
'The email profile that will be used to send this email.'
), label=_('Mailing profile'), queryset=UserMailer.objects.none()
)
class UserMailerBackendSelectionForm(forms.Form):
backend = forms.ChoiceField(
choices=(), help_text=_('The driver to use when sending emails.'),
label=_('Backend')
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['backend'].choices = MailerBackend.get_choices()
class UserMailerDynamicForm(BackendDynamicForm):
class Meta:
fields = ('label', 'enabled')
model = UserMailer
class UserMailerTestForm(forms.Form):
email = forms.CharField(
help_text=_(
'Email address of the recipient. Can be multiple addresses '
'separated by comma or semicolon.'
), label=_('Email address'), validators=[validate_email_multiple]
)
| 1,402 |
8,570 | <gh_stars>1000+
{
"$schema": "http://json.schemastore.org/ide.host",
"order": 610,
"icon": "icon.png",
"disableHttpsSymbol": "NoHttps",
"symbolInfo": [
{
"id": "Hosted",
"isVisible": true,
"persistenceScope": "templateGroup"
},
{
"id": "PWA",
"isVisible": true,
"persistenceScope": "templateGroup"
},
{
"id": "UseProgramMain",
"isVisible": true,
"persistenceScope": "shared",
"persistenceScopeName": "Microsoft"
}
]
}
| 246 |
563 | package com.gentics.mesh.core.endpoint.admin.consistency;
import java.util.ArrayList;
import java.util.List;
import com.gentics.mesh.core.rest.admin.consistency.InconsistencyInfo;
import com.gentics.mesh.core.rest.admin.consistency.InconsistencySeverity;
import com.gentics.mesh.core.rest.admin.consistency.RepairAction;
/**
* POJO of consistency check result.
*/
public class ConsistencyCheckResult {
private static final int MAX_RESULTS = 200;
private long repairCount = 0;
private List<InconsistencyInfo> results = new ArrayList<>(MAX_RESULTS);
public long getRepairCount() {
return repairCount;
}
public List<InconsistencyInfo> getResults() {
return results;
}
/**
* Add inconsistency information to the result.
*
* @param msg
* @param uuid
* @param severity
*/
public void addInconsistency(String msg, String uuid, InconsistencySeverity severity) {
addInconsistency(new InconsistencyInfo().setDescription(msg).setElementUuid(uuid).setSeverity(severity));
}
/**
* Add inconsistency information to the result.
*
* @param info
*/
public void addInconsistency(InconsistencyInfo info) {
if (info.isRepaired()) {
repairCount++;
}
// Keep the list of results small
if (results.size() < MAX_RESULTS) {
results.add(info);
}
}
/**
* Add inconsistency information to the result.
*
* @param msg
* Inconsistency message
* @param uuid
* Uuid of the related element for which the inconsistency was reported
* @param severity
* Severity of the reported inconsistency
* @param repaired
* Was the inconsistency repaired
* @param action
* Is a repair action possible
*/
public void addInconsistency(String msg, String uuid, InconsistencySeverity severity, boolean repaired, RepairAction action) {
addInconsistency(
new InconsistencyInfo().setDescription(msg).setElementUuid(uuid).setSeverity(severity).setRepaired(repaired).setRepairAction(action));
}
/**
* Add the given results into this result.
*
* @param results
* @return Fluent API
*/
public ConsistencyCheckResult merge(ConsistencyCheckResult... results) {
for (ConsistencyCheckResult result : results) {
merge(result);
}
return this;
}
/**
* Merge the two results into this result.
*
* @param result
* @return Fluent API
*/
public ConsistencyCheckResult merge(ConsistencyCheckResult result) {
getResults().addAll(result.getResults());
repairCount += result.getRepairCount();
return this;
}
}
| 871 |
4,262 | <reponame>rikvb/camel<filename>components/camel-ftp/src/main/java/org/apache/camel/component/file/remote/FtpsOperations.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.file.remote;
import java.io.IOException;
import org.apache.camel.Exchange;
import org.apache.camel.component.file.GenericFileOperationFailedException;
import org.apache.camel.util.ObjectHelper;
import org.apache.commons.net.ftp.FTPClientConfig;
import org.apache.commons.net.ftp.FTPSClient;
/**
* FTP Secure (FTP over SSL/TLS) operations
*/
public class FtpsOperations extends FtpOperations {
public FtpsOperations(FTPSClient client, FTPClientConfig clientConfig) {
super(client, clientConfig);
}
@Override
public boolean connect(RemoteFileConfiguration configuration, Exchange exchange)
throws GenericFileOperationFailedException {
boolean answer = super.connect(configuration, exchange);
FtpsConfiguration config = (FtpsConfiguration) configuration;
if (answer) {
try {
String execProt = config.getExecProt();
Long execPbsz = config.getExecPbsz();
// use default values for prop and pbsz, unless told to not do
// so
if (!config.isDisableSecureDataChannelDefaults()) {
if (ObjectHelper.isEmpty(execProt)) {
execProt = "P";
}
if (ObjectHelper.isEmpty(execPbsz)) {
execPbsz = 0L;
}
}
if (execPbsz != null) {
log.debug("FTPClient initializing with execPbsz={}", execPbsz);
getFtpClient().execPBSZ(execPbsz);
}
if (execProt != null) {
log.debug("FTPClient initializing with execProt={}", execProt);
getFtpClient().execPROT(execProt);
}
} catch (IOException e) {
throw new GenericFileOperationFailedException(
client.getReplyCode(), client.getReplyString(), e.getMessage(), e);
} finally {
if (exchange != null) {
// store client reply information after the operation
exchange.getIn().setHeader(FtpConstants.FTP_REPLY_CODE, client.getReplyCode());
exchange.getIn().setHeader(FtpConstants.FTP_REPLY_STRING, client.getReplyString());
}
}
}
return answer;
}
@Override
protected FTPSClient getFtpClient() {
return (FTPSClient) super.getFtpClient();
}
}
| 1,446 |
348 | {"nom":"Saulles","circ":"1ère circonscription","dpt":"Haute-Marne","inscrits":73,"abs":20,"votants":53,"blancs":4,"nuls":7,"exp":42,"res":[{"nuance":"LR","nom":"<NAME>","voix":26},{"nuance":"REM","nom":"<NAME>","voix":16}]} | 91 |
779 | // Based on: https://github.com/iovisor/bcc/blob/master/libbpf-tools/tcpconnect.c
#include "vmlinux.h"
#include "solo_types.h"
#include "bpf/bpf_helpers.h"
#include "bpf/bpf_core_read.h"
#include "bpf/bpf_tracing.h"
char __license[] SEC("license") = "Dual MIT/GPL";
struct dimensions_t {
ipv4_addr saddr;
ipv4_addr daddr;
} __attribute__((packed));
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 8192);
__type(key, u32);
__type(value, struct sock *);
__uint(map_flags, BPF_F_NO_PREALLOC);
} sockets SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 8192);
__type(key, struct dimensions_t);
__type(value, u64);
} events_hash SEC(".maps.counter");
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 24);
__type(value, struct dimensions_t);
} events_ring SEC(".maps.counter");
static __always_inline int
enter_tcp_connect(struct pt_regs *ctx, struct sock *sk)
{
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 tid = pid_tgid;
bpf_printk("enter called");
bpf_printk("enter: setting sk for tid: %u", tid);
bpf_map_update_elem(&sockets, &tid, &sk, 0);
return 0;
}
static __always_inline int
exit_tcp_connect(struct pt_regs *ctx, int ret)
{
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u32 tid = pid_tgid;
struct sock **skpp;
struct sock *sk;
__u32 saddr;
__u32 daddr;
u64 val;
u64 *valp;
struct dimensions_t hash_key = {};
bpf_printk("exit: getting sk for tid: '%u', ret is: '%d'", tid, ret);
skpp = bpf_map_lookup_elem(&sockets, &tid);
if (!skpp) {
bpf_printk("exit: no pointer for tid, returning: %u", tid);
return 0;
}
sk = *skpp;
bpf_printk("exit: found sk for tid: %u", tid);
BPF_CORE_READ_INTO(&saddr, sk, __sk_common.skc_rcv_saddr);
BPF_CORE_READ_INTO(&daddr, sk, __sk_common.skc_daddr);
hash_key.saddr = saddr;
hash_key.daddr = daddr;
// Set Hash map
valp = bpf_map_lookup_elem(&events_hash, &hash_key);
if (!valp) {
bpf_printk("no entry for {saddr: %u, daddr: %u}", hash_key.saddr, hash_key.daddr);
val = 1;
}
else {
bpf_printk("found existing value '%llu' for {saddr: %u, daddr: %u}", *valp, hash_key.saddr, hash_key.daddr);
val = *valp + 1;
}
bpf_map_update_elem(&events_hash, &hash_key, &val, 0);
bpf_map_delete_elem(&sockets, &tid);
// Set Ringbuffer
struct dimensions_t *ring_val;
ring_val = bpf_ringbuf_reserve(&events_ring, sizeof(struct dimensions_t), 0);
if (!ring_val) {
return 0;
}
ring_val->saddr = saddr;
ring_val->daddr = daddr;
bpf_ringbuf_submit(ring_val, 0);
return 0;
}
SEC("kprobe/tcp_v4_connect")
int BPF_KPROBE(tcp_v4_connect, struct sock *sk)
{
return enter_tcp_connect(ctx, sk);
}
SEC("kretprobe/tcp_v4_connect")
int BPF_KRETPROBE(tcp_v4_connect_ret, int ret)
{
return exit_tcp_connect(ctx, ret);
}
| 1,252 |
580 | import pytest
from icevision.all import *
from icevision.models.torchvision import keypoint_rcnn
def test_keypoints_rcnn_show_results(ochuman_ds, monkeypatch):
monkeypatch.setattr(plt, "show", lambda: None)
train_ds, valid_ds = ochuman_ds
model = keypoint_rcnn.model(num_keypoints=19)
keypoint_rcnn.show_results(model=model, dataset=valid_ds, num_samples=1, ncols=1)
| 146 |
375 | #include "fzf.h"
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
// TODO(conni2461): UNICODE HEADER
#define UNICODE_MAXASCII 0x7f
/* Helpers */
#define free_alloc(obj) \
if (obj.allocated) { \
free(obj.data); \
}
#define gen_slice(name, type) \
typedef struct { \
type *data; \
size_t size; \
} name##_slice_t; \
static name##_slice_t slice_##name(type *input, size_t from, size_t to) { \
return (name##_slice_t){.data = input + from, .size = to - from}; \
} \
static name##_slice_t slice_##name##_right(type *input, size_t to) { \
return slice_##name(input, 0, to); \
}
#define gen_simple_slice(name, type) \
typedef struct { \
type *data; \
size_t size; \
} name##_slice_t; \
static name##_slice_t slice_##name(type *input, size_t from, size_t to) { \
return (name##_slice_t){.data = input + from, .size = to - from}; \
}
gen_slice(i16, int16_t);
gen_simple_slice(i32, int32_t);
gen_slice(str, const char);
#undef gen_slice
#undef gen_simple_slice
/* TODO(conni2461): additional types (utf8) */
typedef int32_t char_class;
typedef char byte;
typedef enum {
score_match = 16,
score_gap_start = -3,
score_gap_extention = -1,
bonus_boundary = score_match / 2,
bonus_non_word = score_match / 2,
bonus_camel_123 = bonus_boundary + score_gap_extention,
bonus_consecutive = -(score_gap_start + score_gap_extention),
bonus_first_char_multiplier = 2,
} score_t;
typedef enum {
char_non_word = 0,
char_lower,
char_upper,
char_letter,
char_number
} char_types;
typedef struct {
const char *data;
size_t size;
} fzf_string_t;
static int32_t index_byte(fzf_string_t *string, char b) {
for (size_t i = 0; i < string->size; i++) {
if (string->data[i] == b) {
return (int32_t)i;
}
}
return -1;
}
static size_t leading_whitespaces(fzf_string_t *str) {
size_t whitespaces = 0;
for (size_t i = 0; i < str->size; i++) {
if (!isspace((unsigned char)str->data[i])) {
break;
}
whitespaces++;
}
return whitespaces;
}
static size_t trailing_whitespaces(fzf_string_t *str) {
size_t whitespaces = 0;
for (size_t i = str->size - 1; i >= 0; i--) {
if (!isspace((unsigned char)str->data[i])) {
break;
}
whitespaces++;
}
return whitespaces;
}
static void copy_runes(fzf_string_t *src, fzf_i32_t *destination) {
for (size_t i = 0; i < src->size; i++) {
destination->data[i] = (int32_t)src->data[i];
}
}
static void copy_into_i16(i16_slice_t *src, fzf_i16_t *dest) {
for (size_t i = 0; i < src->size; i++) {
dest->data[i] = src->data[i];
}
}
// char* helpers
static char *trim_left(char *str, size_t *len, char trim) {
for (size_t i = 0; i < *len; i++) {
if (str[0] == trim) {
(*len)--;
str++;
} else {
break;
}
}
return str;
}
static bool has_prefix(const char *str, const char *prefix, size_t prefix_len) {
return strncmp(prefix, str, prefix_len) == 0;
}
static bool has_suffix(const char *str, size_t len, const char *suffix,
size_t suffix_len) {
return len >= suffix_len &&
strncmp(slice_str(str, len - suffix_len, len).data, suffix,
suffix_len) == 0;
}
// TODO(conni2461): REFACTOR
static char *str_replace(char *orig, char *rep, char *with) {
char *result, *ins, *tmp;
size_t len_rep, len_with, len_front, count;
if (!orig || !rep) {
return NULL;
}
len_rep = strlen(rep);
if (len_rep == 0) {
return NULL;
}
if (!with) {
with = "";
}
len_with = strlen(with);
ins = orig;
for (count = 0; (tmp = strstr(ins, rep)); ++count) {
ins = tmp + len_rep;
}
tmp = result =
(char *)malloc(strlen(orig) + (len_with - len_rep) * count + 1);
if (!result) {
return NULL;
}
while (count--) {
ins = strstr(orig, rep);
len_front = (size_t)(ins - orig);
tmp = strncpy(tmp, orig, len_front) + len_front;
tmp = strcpy(tmp, with) + len_with;
orig += len_front + len_rep;
}
strcpy(tmp, orig);
return result;
}
// TODO(conni2461): REFACTOR
static char *str_tolower(char *str, size_t size) {
char *lower_str = (char *)malloc((size + 1) * sizeof(char));
for (size_t i = 0; i < size; i++) {
lower_str[i] = (char)tolower(str[i]);
}
lower_str[size] = '\0';
return lower_str;
}
static int16_t max16(int16_t a, int16_t b) {
return (a > b) ? a : b;
}
static size_t min64u(size_t a, size_t b) {
return (a < b) ? a : b;
}
static fzf_position_t *pos_array(bool with_pos, size_t len) {
if (with_pos) {
fzf_position_t *pos = (fzf_position_t *)malloc(sizeof(fzf_position_t));
pos->size = 0;
pos->cap = len;
pos->data = (uint32_t *)malloc(len * sizeof(uint32_t));
return pos;
}
return NULL;
}
static void resize_pos(fzf_position_t *pos, size_t add_len, size_t comp) {
if (pos->size + comp > pos->cap) {
pos->cap += add_len;
pos->data = (uint32_t *)realloc(pos->data, sizeof(uint32_t) * pos->cap);
}
}
static void append_pos(fzf_position_t *pos, size_t value) {
resize_pos(pos, pos->cap, 1);
pos->data[pos->size] = value;
pos->size++;
}
static void concat_pos(fzf_position_t *left, fzf_position_t *right) {
resize_pos(left, right->size, right->size);
memcpy(left->data + left->size, right->data, right->size * sizeof(uint32_t));
left->size += right->size;
}
static void insert_pos(fzf_position_t *pos, size_t start, size_t end) {
resize_pos(pos, end - start, end - start);
for (size_t k = start; k < end; k++) {
pos->data[pos->size] = k;
pos->size++;
}
}
static fzf_i16_t alloc16(size_t *offset, fzf_slab_t *slab, size_t size) {
if (slab != NULL && slab->I16.cap > *offset + size) {
i16_slice_t slice = slice_i16(slab->I16.data, *offset, (*offset) + size);
*offset = *offset + size;
return (fzf_i16_t){.data = slice.data,
.size = slice.size,
.cap = slice.size,
.allocated = false};
}
int16_t *data = (int16_t *)malloc(size * sizeof(int16_t));
return (fzf_i16_t){
.data = data, .size = size, .cap = size, .allocated = true};
}
static fzf_i32_t alloc32(size_t *offset, fzf_slab_t *slab, size_t size) {
if (slab != NULL && slab->I32.cap > *offset + size) {
i32_slice_t slice = slice_i32(slab->I32.data, *offset, (*offset) + size);
*offset = *offset + size;
return (fzf_i32_t){.data = slice.data,
.size = slice.size,
.cap = slice.size,
.allocated = false};
}
int32_t *data = (int32_t *)malloc(size * sizeof(int32_t));
return (fzf_i32_t){
.data = data, .size = size, .cap = size, .allocated = true};
}
static char_class char_class_of_ascii(char ch) {
if (ch >= 'a' && ch <= 'z') {
return char_lower;
} else if (ch >= 'A' && ch <= 'Z') {
return char_upper;
} else if (ch >= '0' && ch <= '9') {
return char_number;
}
return char_non_word;
}
// static char_class char_class_of_non_ascii(char ch) {
// return 0;
// }
static char_class char_class_of(char ch) {
return char_class_of_ascii(ch);
// if (ch <= 0x7f) {
// return char_class_of_ascii(ch);
// }
// return char_class_of_non_ascii(ch);
}
static int16_t bonus_for(char_class prev_class, char_class class) {
if (prev_class == char_non_word && class != char_non_word) {
return bonus_boundary;
} else if ((prev_class == char_lower && class == char_upper) ||
(prev_class != char_number && class == char_number)) {
return bonus_camel_123;
} else if (class == char_non_word) {
return bonus_non_word;
}
return 0;
}
static int16_t bonus_at(fzf_string_t *input, size_t idx) {
if (idx == 0) {
return bonus_boundary;
}
return bonus_for(char_class_of(input->data[idx - 1]),
char_class_of(input->data[idx]));
}
/* TODO(conni2461): maybe just not do this */
static char normalize_rune(char r) {
// TODO(conni2461)
/* if (r < 0x00C0 || r > 0x2184) { */
/* return r; */
/* } */
/* rune n = normalized[r]; */
/* if n > 0 { */
/* return n; */
/* } */
return r;
}
static int32_t try_skip(fzf_string_t *input, bool case_sensitive, byte b,
int32_t from) {
str_slice_t slice = slice_str(input->data, (size_t)from, input->size);
fzf_string_t byte_array = {.data = slice.data, .size = slice.size};
int32_t idx = index_byte(&byte_array, b);
if (idx == 0) {
return from;
}
if (!case_sensitive && b >= 'a' && b <= 'z') {
if (idx > 0) {
str_slice_t tmp = slice_str_right(byte_array.data, (size_t)idx);
byte_array.data = tmp.data;
byte_array.size = tmp.size;
}
int32_t uidx = index_byte(&byte_array, b - (byte)32);
if (uidx >= 0) {
idx = uidx;
}
}
if (idx < 0) {
return -1;
}
return from + idx;
}
static bool is_ascii(const char *runes, size_t size) {
// TODO(conni2461): future use
/* for (size_t i = 0; i < size; i++) { */
/* if (runes[i] >= 256) { */
/* return false; */
/* } */
/* } */
return true;
}
static int32_t ascii_fuzzy_index(fzf_string_t *input, const char *pattern,
size_t size, bool case_sensitive) {
if (!is_ascii(pattern, size)) {
return -1;
}
int32_t first_idx = 0, idx = 0;
for (size_t pidx = 0; pidx < size; pidx++) {
idx = try_skip(input, case_sensitive, pattern[pidx], idx);
if (idx < 0) {
return -1;
}
if (pidx == 0 && idx > 0) {
first_idx = idx - 1;
}
idx++;
}
return first_idx;
}
typedef struct {
int32_t score;
fzf_position_t *pos;
} score_pos_tuple_t;
static score_pos_tuple_t fzf_calculate_score(bool case_sensitive,
bool normalize, fzf_string_t *text,
fzf_string_t *pattern, size_t sidx,
size_t eidx, bool with_pos) {
const size_t len_pattern = pattern->size;
size_t pidx = 0;
int32_t score = 0, consecutive = 0;
bool in_gap = false;
int16_t first_bonus = 0;
fzf_position_t *pos = pos_array(with_pos, len_pattern);
int32_t prev_class = char_non_word;
if (sidx > 0) {
prev_class = char_class_of(text->data[sidx - 1]);
}
for (size_t idx = sidx; idx < eidx; idx++) {
char c = text->data[idx];
int32_t class = char_class_of(c);
if (!case_sensitive) {
/* TODO(conni2461): He does some unicode stuff here, investigate */
c = (char)tolower(c);
}
if (normalize) {
c = normalize_rune(c);
}
if (c == pattern->data[pidx]) {
if (with_pos) {
append_pos(pos, idx);
}
score += score_match;
int16_t bonus = bonus_for(prev_class, class);
if (consecutive == 0) {
first_bonus = bonus;
} else {
if (bonus == bonus_boundary) {
first_bonus = bonus;
}
bonus = max16(max16(bonus, first_bonus), bonus_consecutive);
}
if (pidx == 0) {
score += (int32_t)(bonus * bonus_first_char_multiplier);
} else {
score += (int32_t)bonus;
}
in_gap = false;
consecutive++;
pidx++;
} else {
if (in_gap) {
score += score_gap_extention;
} else {
score += score_gap_start;
}
in_gap = true;
consecutive = 0;
first_bonus = 0;
}
prev_class = class;
}
return (score_pos_tuple_t){score, pos};
}
static fzf_result_t __fuzzy_match_v1(bool case_sensitive, bool normalize,
fzf_string_t *text, fzf_string_t *pattern,
bool with_pos, fzf_slab_t *slab) {
const size_t len_pattern = pattern->size;
const size_t len_runes = text->size;
if (len_pattern == 0) {
return (fzf_result_t){0, 0, 0, NULL};
}
if (ascii_fuzzy_index(text, pattern->data, len_pattern, case_sensitive) < 0) {
return (fzf_result_t){-1, -1, 0, NULL};
}
int32_t pidx = 0;
int32_t sidx = -1, eidx = -1;
for (size_t idx = 0; idx < len_runes; idx++) {
char c = text->data[idx];
/* TODO(conni2461): Common pattern maybe a macro would be good here */
if (!case_sensitive) {
/* TODO(conni2461): He does some unicode stuff here, investigate */
c = (char)tolower(c);
}
if (normalize) {
c = normalize_rune(c);
}
if (c == pattern->data[pidx]) {
if (sidx < 0) {
sidx = (int32_t)idx;
}
pidx++;
if (pidx == len_pattern) {
eidx = (int32_t)idx + 1;
break;
}
}
}
if (sidx >= 0 && eidx >= 0) {
size_t start = (size_t)sidx, end = (size_t)eidx;
pidx--;
for (size_t idx = end - 1; idx >= start; idx--) {
char c = text->data[idx];
if (!case_sensitive) {
/* TODO(conni2461): He does some unicode stuff here, investigate */
c = (char)tolower(c);
}
if (c == pattern->data[pidx]) {
pidx--;
if (pidx < 0) {
start = idx;
break;
}
}
}
score_pos_tuple_t tuple = fzf_calculate_score(
case_sensitive, normalize, text, pattern, start, end, with_pos);
return (fzf_result_t){(int32_t)start, (int32_t)end, tuple.score, tuple.pos};
}
return (fzf_result_t){-1, -1, 0, NULL};
}
fzf_result_t fzf_fuzzy_match_v1(bool case_sensitive, bool normalize,
const char *input, const char *pattern,
bool with_pos, fzf_slab_t *slab) {
fzf_string_t input_wrap = {.data = input, .size = strlen(input)};
fzf_string_t pattern_wrap = {.data = pattern, .size = strlen(pattern)};
return __fuzzy_match_v1(case_sensitive, normalize, &input_wrap, &pattern_wrap,
with_pos, slab);
}
static fzf_result_t __fuzzy_match_v2(bool case_sensitive, bool normalize,
fzf_string_t *input, fzf_string_t *pattern,
bool with_pos, fzf_slab_t *slab) {
const size_t M = pattern->size;
const size_t N = input->size;
if (M == 0) {
return (fzf_result_t){0, 0, 0, pos_array(with_pos, M)};
}
if (slab != NULL && N * M > slab->I16.cap) {
return __fuzzy_match_v1(case_sensitive, normalize, input, pattern, with_pos,
slab);
}
size_t idx;
{
int32_t tmp_idx =
ascii_fuzzy_index(input, pattern->data, M, case_sensitive);
if (tmp_idx < 0) {
return (fzf_result_t){-1, -1, 0, NULL};
}
idx = (size_t)tmp_idx;
}
size_t offset16 = 0, offset32 = 0;
fzf_i16_t H0 = alloc16(&offset16, slab, N);
fzf_i16_t C0 = alloc16(&offset16, slab, N);
// Bonus point for each positions
fzf_i16_t B = alloc16(&offset16, slab, N);
// The first occurrence of each character in the pattern
fzf_i32_t F = alloc32(&offset32, slab, M);
// Rune array
fzf_i32_t T = alloc32(&offset32, slab, N);
copy_runes(input, &T); // input.CopyRunes(T)
// Phase 2. Calculate bonus for each point
int16_t max_score = 0;
size_t max_score_pos = 0;
size_t pidx = 0, last_idx = 0;
char pchar0 = pattern->data[0];
char pchar = pattern->data[0];
int16_t prevH0 = 0;
int32_t prev_class = char_non_word;
bool in_gap = false;
i32_slice_t Tsub = slice_i32(T.data, idx, T.size); // T[idx:];
i16_slice_t H0sub =
slice_i16_right(slice_i16(H0.data, idx, H0.size).data, Tsub.size);
i16_slice_t C0sub =
slice_i16_right(slice_i16(C0.data, idx, C0.size).data, Tsub.size);
i16_slice_t Bsub =
slice_i16_right(slice_i16(B.data, idx, B.size).data, Tsub.size);
for (size_t off = 0; off < Tsub.size; off++) {
char_class class;
char c = (char)Tsub.data[off];
class = char_class_of_ascii(c);
if (!case_sensitive && class == char_upper) {
/* TODO(conni2461): unicode support */
c = (char)tolower(c);
}
if (normalize) {
c = normalize_rune(c);
}
Tsub.data[off] = c;
int16_t bonus = bonus_for(prev_class, class);
Bsub.data[off] = bonus;
prev_class = class;
if (c == pchar) {
if (pidx < M) {
F.data[pidx] = (int32_t)(idx + off);
pidx++;
pchar = pattern->data[min64u(pidx, M - 1)];
}
last_idx = idx + off;
}
if (c == pchar0) {
int16_t score = score_match + bonus * bonus_first_char_multiplier;
H0sub.data[off] = score;
C0sub.data[off] = 1;
if (M == 1 && (score > max_score)) {
max_score = score;
max_score_pos = idx + off;
if (bonus == bonus_boundary) {
break;
}
}
in_gap = false;
} else {
if (in_gap) {
H0sub.data[off] = max16(prevH0 + score_gap_extention, 0);
} else {
H0sub.data[off] = max16(prevH0 + score_gap_start, 0);
}
C0sub.data[off] = 0;
in_gap = true;
}
prevH0 = H0sub.data[off];
}
if (pidx != M) {
free_alloc(T);
free_alloc(F);
free_alloc(B);
free_alloc(C0);
free_alloc(H0);
return (fzf_result_t){-1, -1, 0, NULL};
}
if (M == 1) {
free_alloc(T);
free_alloc(F);
free_alloc(B);
free_alloc(C0);
free_alloc(H0);
fzf_result_t res = {(int32_t)max_score_pos, (int32_t)max_score_pos + 1,
max_score, NULL};
if (!with_pos) {
return res;
}
fzf_position_t *pos = pos_array(with_pos, 1);
append_pos(pos, max_score_pos);
res.pos = pos;
return res;
}
size_t f0 = (size_t)F.data[0];
size_t width = last_idx - f0 + 1;
fzf_i16_t H = alloc16(&offset16, slab, width * M);
{
i16_slice_t H0_tmp_slice = slice_i16(H0.data, f0, last_idx + 1);
copy_into_i16(&H0_tmp_slice, &H);
}
fzf_i16_t C = alloc16(&offset16, slab, width * M);
{
i16_slice_t C0_tmp_slice = slice_i16(C0.data, f0, last_idx + 1);
copy_into_i16(&C0_tmp_slice, &C);
}
i32_slice_t Fsub = slice_i32(F.data, 1, F.size);
str_slice_t Psub =
slice_str_right(slice_str(pattern->data, 1, M).data, Fsub.size);
for (size_t off = 0; off < Fsub.size; off++) {
size_t f = (size_t)Fsub.data[off];
pchar = Psub.data[off];
pidx = off + 1;
size_t row = pidx * width;
in_gap = false;
Tsub = slice_i32(T.data, f, last_idx + 1);
Bsub = slice_i16_right(slice_i16(B.data, f, B.size).data, Tsub.size);
i16_slice_t Csub = slice_i16_right(
slice_i16(C.data, row + f - f0, C.size).data, Tsub.size);
i16_slice_t Cdiag = slice_i16_right(
slice_i16(C.data, row + f - f0 - 1 - width, C.size).data, Tsub.size);
i16_slice_t Hsub = slice_i16_right(
slice_i16(H.data, row + f - f0, H.size).data, Tsub.size);
i16_slice_t Hdiag = slice_i16_right(
slice_i16(H.data, row + f - f0 - 1 - width, H.size).data, Tsub.size);
i16_slice_t Hleft = slice_i16_right(
slice_i16(H.data, row + f - f0 - 1, H.size).data, Tsub.size);
Hleft.data[0] = 0;
for (size_t j = 0; j < Tsub.size; j++) {
char c = (char)Tsub.data[j];
size_t col = j + f;
int16_t s1 = 0, s2 = 0;
int16_t consecutive = 0;
if (in_gap) {
s2 = Hleft.data[j] + score_gap_extention;
} else {
s2 = Hleft.data[j] + score_gap_start;
}
if (pchar == c) {
s1 = Hdiag.data[j] + score_match;
int16_t b = Bsub.data[j];
consecutive = Cdiag.data[j] + 1;
if (b == bonus_boundary) {
consecutive = 1;
} else if (consecutive > 1) {
b = max16(b, max16(bonus_consecutive,
B.data[col - ((size_t)consecutive) + 1]));
}
if (s1 + b < s2) {
s1 += Bsub.data[j];
consecutive = 0;
} else {
s1 += b;
}
}
Csub.data[j] = consecutive;
in_gap = s1 < s2;
int16_t score = max16(max16(s1, s2), 0);
if (pidx == M - 1 && (score > max_score)) {
max_score = score;
max_score_pos = col;
}
Hsub.data[j] = score;
}
}
fzf_position_t *pos = pos_array(with_pos, M);
size_t j = max_score_pos;
if (with_pos) {
size_t i = M - 1;
bool prefer_match = true;
for (;;) {
size_t I = i * width;
size_t j0 = j - f0;
int16_t s = H.data[I + j0];
int16_t s1 = 0;
int16_t s2 = 0;
if (i > 0 && j >= F.data[i]) {
s1 = H.data[I - width + j0 - 1];
}
if (j > F.data[i]) {
s2 = H.data[I + j0 - 1];
}
if (s > s1 && (s > s2 || (s == s2 && prefer_match))) {
append_pos(pos, j);
if (i == 0) {
break;
}
i--;
}
prefer_match = C.data[I + j0] > 1 || (I + width + j0 + 1 < C.size &&
C.data[I + width + j0 + 1] > 0);
j--;
}
}
free_alloc(H);
free_alloc(C);
free_alloc(T);
free_alloc(F);
free_alloc(B);
free_alloc(C0);
free_alloc(H0);
return (fzf_result_t){(int32_t)j, (int32_t)max_score_pos + 1,
(int32_t)max_score, pos};
}
fzf_result_t fzf_fuzzy_match_v2(bool case_sensitive, bool normalize,
const char *input, const char *pattern,
bool with_pos, fzf_slab_t *slab) {
fzf_string_t input_wrap = {.data = input, .size = strlen(input)};
fzf_string_t pattern_wrap = {.data = pattern, .size = strlen(pattern)};
return __fuzzy_match_v2(case_sensitive, normalize, &input_wrap, &pattern_wrap,
with_pos, slab);
}
static fzf_result_t __exact_match_naive(bool case_sensitive, bool normalize,
fzf_string_t *text,
fzf_string_t *pattern, bool with_pos,
fzf_slab_t *slab) {
const size_t len_pattern = pattern->size;
const size_t len_runes = text->size;
if (len_pattern == 0) {
return (fzf_result_t){0, 0, 0, NULL};
}
if (len_runes < len_pattern) {
return (fzf_result_t){-1, -1, 0, NULL};
}
if (ascii_fuzzy_index(text, pattern->data, len_pattern, case_sensitive) < 0) {
return (fzf_result_t){-1, -1, 0, NULL};
}
size_t pidx = 0;
int32_t best_pos = -1;
int16_t bonus = 0;
int16_t best_bonus = -1;
for (size_t idx = 0; idx < len_runes; idx++) {
size_t idx_ = idx;
char c = text->data[idx_];
if (!case_sensitive) {
/* TODO(conni2461): He does some unicode stuff here, investigate */
c = (char)tolower(c);
}
if (normalize) {
c = normalize_rune(c);
}
size_t pidx_ = pidx;
if (c == pattern->data[pidx_]) {
if (pidx_ == 0) {
bonus = bonus_at(text, idx_);
}
pidx++;
if (pidx == len_pattern) {
if (bonus > best_bonus) {
best_pos = (int32_t)idx;
best_bonus = bonus;
}
if (bonus == bonus_boundary) {
break;
}
idx -= pidx - 1;
pidx = 0;
bonus = 0;
}
} else {
idx -= pidx;
pidx = 0;
bonus = 0;
}
}
if (best_pos >= 0) {
size_t bp = (size_t)best_pos;
size_t sidx = bp - len_pattern + 1;
size_t eidx = bp + 1;
int32_t score = fzf_calculate_score(case_sensitive, normalize, text,
pattern, sidx, eidx, false)
.score;
return (fzf_result_t){(int32_t)sidx, (int32_t)eidx, score, NULL};
}
return (fzf_result_t){-1, -1, 0, NULL};
}
fzf_result_t fzf_exact_match_naive(bool case_sensitive, bool normalize,
const char *input, const char *pattern,
bool with_pos, fzf_slab_t *slab) {
fzf_string_t input_wrap = {.data = input, .size = strlen(input)};
fzf_string_t pattern_wrap = {.data = pattern, .size = strlen(pattern)};
return __exact_match_naive(case_sensitive, normalize, &input_wrap,
&pattern_wrap, with_pos, slab);
}
static fzf_result_t __prefix_match(bool case_sensitive, bool normalize,
fzf_string_t *text, fzf_string_t *pattern,
bool with_pos, fzf_slab_t *slab) {
const size_t len_pattern = pattern->size;
if (len_pattern == 0) {
return (fzf_result_t){0, 0, 0, NULL};
}
size_t trimmed_len = 0;
/* TODO(conni2461): i feel this is wrong */
if (!isspace((unsigned char)pattern->data[0])) {
trimmed_len = leading_whitespaces(text);
}
if (text->size - trimmed_len < len_pattern) {
return (fzf_result_t){-1, -1, 0, NULL};
}
for (size_t i = 0; i < len_pattern; i++) {
char c = text->data[trimmed_len + i];
if (!case_sensitive) {
c = (char)tolower(c);
}
if (normalize) {
c = normalize_rune(c);
}
if (c != pattern->data[i]) {
return (fzf_result_t){-1, -1, 0, NULL};
}
}
size_t start = trimmed_len;
size_t end = trimmed_len + len_pattern;
int32_t score = fzf_calculate_score(case_sensitive, normalize, text, pattern,
start, end, false)
.score;
return (fzf_result_t){(int32_t)start, (int32_t)end, score, NULL};
}
fzf_result_t fzf_prefix_match(bool case_sensitive, bool normalize,
const char *input, const char *pattern,
bool with_pos, fzf_slab_t *slab) {
fzf_string_t input_wrap = {.data = input, .size = strlen(input)};
fzf_string_t pattern_wrap = {.data = pattern, .size = strlen(pattern)};
return __prefix_match(case_sensitive, normalize, &input_wrap, &pattern_wrap,
with_pos, slab);
}
static fzf_result_t __suffix_match(bool case_sensitive, bool normalize,
fzf_string_t *text, fzf_string_t *pattern,
bool with_pos, fzf_slab_t *slab) {
const size_t len_runes = text->size;
size_t trimmed_len = len_runes;
const size_t len_pattern = pattern->size;
/* TODO(conni2461): i feel this is wrong */
if (len_pattern == 0 ||
!isspace((unsigned char)pattern->data[len_pattern - 1])) {
trimmed_len -= trailing_whitespaces(text);
}
if (len_pattern == 0) {
return (fzf_result_t){(int32_t)trimmed_len, (int32_t)trimmed_len, 0, NULL};
}
size_t diff = trimmed_len - len_pattern;
if (diff < 0) {
return (fzf_result_t){-1, -1, 0, NULL};
}
for (size_t idx = 0; idx < len_pattern; idx++) {
char c = text->data[idx + diff];
if (!case_sensitive) {
c = (char)tolower(c);
}
if (normalize) {
c = normalize_rune(c);
}
if (c != pattern->data[idx]) {
return (fzf_result_t){-1, -1, 0, NULL};
}
}
size_t start = trimmed_len - len_pattern;
size_t end = trimmed_len;
int32_t score = fzf_calculate_score(case_sensitive, normalize, text, pattern,
start, end, false)
.score;
return (fzf_result_t){(int32_t)start, (int32_t)end, score, NULL};
}
fzf_result_t fzf_suffix_match(bool case_sensitive, bool normalize,
const char *input, const char *pattern,
bool with_pos, fzf_slab_t *slab) {
fzf_string_t input_wrap = {.data = input, .size = strlen(input)};
fzf_string_t pattern_wrap = {.data = pattern, .size = strlen(pattern)};
return __suffix_match(case_sensitive, normalize, &input_wrap, &pattern_wrap,
with_pos, slab);
}
static fzf_result_t __equal_match(bool case_sensitive, bool normalize,
fzf_string_t *text, fzf_string_t *pattern,
bool withPos, fzf_slab_t *slab) {
const size_t len_pattern = pattern->size;
if (len_pattern == 0) {
return (fzf_result_t){-1, -1, 0, NULL};
}
size_t trimmed_len = leading_whitespaces(text);
size_t trimmed_end_len = trailing_whitespaces(text);
if ((text->size - trimmed_len - trimmed_end_len) != len_pattern) {
return (fzf_result_t){-1, -1, 0, NULL};
}
bool match = true;
if (normalize) {
// TODO(conni2461): to rune
for (size_t idx = 0; idx < len_pattern; idx++) {
char pchar = pattern->data[idx];
char c = text->data[trimmed_len + idx];
if (!case_sensitive) {
c = (char)tolower(c);
}
if (normalize_rune(c) != normalize_rune(pchar)) {
match = false;
break;
}
}
} else {
// TODO(conni2461): to rune
for (size_t idx = 0; idx < len_pattern; idx++) {
char pchar = pattern->data[idx];
char c = text->data[trimmed_len + idx];
if (!case_sensitive) {
c = (char)tolower(c);
}
if (c != pchar) {
match = false;
break;
}
}
}
if (match) {
return (fzf_result_t){
(int32_t)trimmed_len, ((int32_t)trimmed_len + (int32_t)len_pattern),
(score_match + bonus_boundary) * (int32_t)len_pattern +
(bonus_first_char_multiplier - 1) * bonus_boundary,
NULL};
}
return (fzf_result_t){-1, -1, 0, NULL};
}
fzf_result_t fzf_equal_match(bool case_sensitive, bool normalize,
const char *input, const char *pattern,
bool with_pos, fzf_slab_t *slab) {
fzf_string_t input_wrap = {.data = input, .size = strlen(input)};
fzf_string_t pattern_wrap = {.data = pattern, .size = strlen(pattern)};
return __equal_match(case_sensitive, normalize, &input_wrap, &pattern_wrap,
with_pos, slab);
}
static void append_set(fzf_term_set_t *set, fzf_term_t value) {
if (set->cap == 0) {
set->cap = 1;
set->ptr = (fzf_term_t *)malloc(sizeof(fzf_term_t));
} else if (set->size + 1 > set->cap) {
set->cap *= 2;
set->ptr = realloc(set->ptr, sizeof(fzf_term_t) * set->cap);
}
set->ptr[set->size] = value;
set->size++;
}
static void append_pattern(fzf_pattern_t *pattern, fzf_term_set_t *value) {
if (pattern->cap == 0) {
pattern->cap = 1;
pattern->ptr = (fzf_term_set_t **)malloc(sizeof(fzf_term_set_t *));
} else if (pattern->size + 1 > pattern->cap) {
pattern->cap *= 2;
pattern->ptr =
realloc(pattern->ptr, sizeof(fzf_term_set_t *) * pattern->cap);
}
pattern->ptr[pattern->size] = value;
pattern->size++;
}
static fzf_result_t fzf_call_alg(fzf_term_t *term, bool normalize,
fzf_string_t *input, bool with_pos,
fzf_slab_t *slab) {
switch (term->typ) {
case term_fuzzy:
return __fuzzy_match_v2(term->case_sensitive, normalize, input,
(fzf_string_t *)term->text, with_pos, slab);
case term_exact:
return __exact_match_naive(term->case_sensitive, normalize, input,
(fzf_string_t *)term->text, with_pos, slab);
case term_prefix:
return __prefix_match(term->case_sensitive, normalize, input,
(fzf_string_t *)term->text, with_pos, slab);
case term_suffix:
return __suffix_match(term->case_sensitive, normalize, input,
(fzf_string_t *)term->text, with_pos, slab);
case term_equal:
return __equal_match(term->case_sensitive, normalize, input,
(fzf_string_t *)term->text, with_pos, slab);
}
return __fuzzy_match_v2(term->case_sensitive, normalize, input,
(fzf_string_t *)term->text, with_pos, slab);
}
// TODO(conni2461): REFACTOR
/* assumption (maybe i change that later)
* - always v2 alg
* - bool extended always true (thats the whole point of this isn't it)
*/
fzf_pattern_t *fzf_parse_pattern(fzf_case_types case_mode, bool normalize,
char *pattern, bool fuzzy) {
size_t pat_len = strlen(pattern);
pattern = trim_left(pattern, &pat_len, ' ');
while (has_suffix(pattern, pat_len, " ", 1) &&
!has_suffix(pattern, pat_len, "\\ ", 2)) {
pattern[pat_len - 1] = 0;
pat_len--;
}
char *pattern_copy = str_replace(pattern, "\\ ", "\t");
const char *delim = " ";
char *ptr = strtok(pattern_copy, delim);
fzf_pattern_t *pat_obj = (fzf_pattern_t *)malloc(sizeof(fzf_pattern_t));
memset(pat_obj, 0, sizeof(*pat_obj));
fzf_term_set_t *set = (fzf_term_set_t *)malloc(sizeof(fzf_term_set_t));
memset(set, 0, sizeof(*set));
bool switch_set = false;
bool after_bar = false;
while (ptr != NULL) {
fzf_alg_types typ = term_fuzzy;
bool inv = false;
char *text = str_replace(ptr, "\t", " ");
size_t len = strlen(text);
char *og_str = text;
char *lower_text = str_tolower(text, len);
bool case_sensitive = case_mode == case_respect ||
(case_mode == case_smart && strcmp(text, lower_text));
if (!case_sensitive) {
free(text);
text = lower_text;
og_str = lower_text;
} else {
free(lower_text);
}
if (!fuzzy) {
typ = term_exact;
}
if (set->size > 0 && !after_bar && strcmp(text, "|") == 0) {
switch_set = false;
after_bar = true;
ptr = strtok(NULL, delim);
free(og_str);
continue;
}
after_bar = false;
if (has_prefix(text, "!", 1)) {
inv = true;
typ = term_exact;
text++;
len--;
}
if (strcmp(text, "$") != 0 && has_suffix(text, len, "$", 1)) {
typ = term_suffix;
text[len - 1] = 0;
len--;
}
if (has_prefix(text, "'", 1)) {
if (fuzzy && !inv) {
typ = term_exact;
text++;
len--;
} else {
typ = term_fuzzy;
text++;
len--;
}
} else if (has_prefix(text, "^", 1)) {
if (typ == term_suffix) {
typ = term_equal;
} else {
typ = term_prefix;
}
text++;
len--;
}
if (len > 0) {
if (switch_set) {
append_pattern(pat_obj, set);
set = (fzf_term_set_t *)malloc(sizeof(fzf_term_set_t));
set->cap = 0;
set->size = 0;
}
fzf_string_t *text_ptr = (fzf_string_t *)malloc(sizeof(fzf_string_t));
text_ptr->data = text;
text_ptr->size = len;
append_set(set, (fzf_term_t){.typ = typ,
.inv = inv,
.ptr = og_str,
.text = text_ptr,
.case_sensitive = case_sensitive});
switch_set = true;
} else {
free(og_str);
}
ptr = strtok(NULL, delim);
}
if (set->size > 0) {
append_pattern(pat_obj, set);
}
bool only = true;
for (size_t i = 0; i < pat_obj->size; i++) {
fzf_term_set_t *term_set = pat_obj->ptr[i];
if (term_set->size > 1) {
only = false;
break;
}
if (term_set->ptr[0].inv == false) {
only = false;
break;
}
}
pat_obj->only_inv = only;
free(pattern_copy);
return pat_obj;
}
void fzf_free_pattern(fzf_pattern_t *pattern) {
for (size_t i = 0; i < pattern->size; i++) {
fzf_term_set_t *term_set = pattern->ptr[i];
for (size_t j = 0; j < term_set->size; j++) {
fzf_term_t *term = &term_set->ptr[j];
free(term->ptr);
free(term->text);
}
free(term_set->ptr);
free(term_set);
}
free(pattern->ptr);
free(pattern);
}
int32_t fzf_get_score(const char *text, fzf_pattern_t *pattern,
fzf_slab_t *slab) {
fzf_string_t input = {.data = text, .size = strlen(text)};
if (pattern->only_inv) {
int final = 0;
for (size_t i = 0; i < pattern->size; i++) {
fzf_term_set_t *term_set = pattern->ptr[i];
fzf_term_t *term = &term_set->ptr[0];
final += fzf_call_alg(term, false, &input, false, slab).score;
}
return (final > 0) ? 0 : 1;
}
int32_t total_score = 0;
for (size_t i = 0; i < pattern->size; i++) {
fzf_term_set_t *term_set = pattern->ptr[i];
int32_t current_score = 0;
bool matched = false;
for (size_t j = 0; j < term_set->size; j++) {
fzf_term_t *term = &term_set->ptr[j];
fzf_result_t res = fzf_call_alg(term, false, &input, false, slab);
if (res.start >= 0) {
if (term->inv) {
continue;
}
current_score = res.score;
matched = true;
} else if (term->inv) {
current_score = 0;
matched = true;
}
}
if (matched) {
total_score += current_score;
} else {
total_score = 0;
break;
}
}
return total_score;
}
fzf_position_t *fzf_get_positions(const char *text, fzf_pattern_t *pattern,
fzf_slab_t *slab) {
fzf_string_t input = {.data = text, .size = strlen(text)};
fzf_position_t *all_pos = pos_array(true, 1);
for (size_t i = 0; i < pattern->size; i++) {
fzf_term_set_t *term_set = pattern->ptr[i];
fzf_result_t current_res = (fzf_result_t){0, 0, 0, NULL};
bool matched = false;
for (size_t j = 0; j < term_set->size; j++) {
fzf_term_t *term = &term_set->ptr[j];
fzf_result_t res = fzf_call_alg(term, false, &input, true, slab);
if (res.start >= 0) {
if (term->inv) {
fzf_free_positions(res.pos);
continue;
}
current_res = res;
matched = true;
} else if (term->inv) {
matched = true;
}
}
if (matched) {
if (current_res.pos) {
concat_pos(all_pos, current_res.pos);
fzf_free_positions(current_res.pos);
} else {
int32_t diff = (current_res.end - current_res.start);
if (diff > 0) {
insert_pos(all_pos, (size_t)current_res.start,
(size_t)current_res.end);
}
}
} else {
free(all_pos->data);
memset(all_pos, 0, sizeof(*all_pos));
break;
}
}
return all_pos;
}
void fzf_free_positions(fzf_position_t *pos) {
if (pos) {
if (pos->data) {
free(pos->data);
}
free(pos);
}
}
fzf_slab_t *fzf_make_slab(size_t size_16, size_t size_32) {
fzf_slab_t *slab = (fzf_slab_t *)malloc(sizeof(fzf_slab_t));
memset(slab, 0, sizeof(*slab));
slab->I16.data = (int16_t *)malloc(size_16 * sizeof(int16_t));
memset(slab->I16.data, 0, size_16 * sizeof(*slab->I16.data));
slab->I16.cap = size_16;
slab->I16.size = 0;
slab->I16.allocated = true;
slab->I32.data = (int32_t *)malloc(size_32 * sizeof(int32_t));
memset(slab->I32.data, 0, size_32 * sizeof(*slab->I32.data));
slab->I32.cap = size_32;
slab->I32.size = 0;
slab->I32.allocated = true;
return slab;
}
fzf_slab_t *fzf_make_default_slab(void) {
return fzf_make_slab(100 * 1024, 2048);
}
void fzf_free_slab(fzf_slab_t *slab) {
if (slab) {
free(slab->I16.data);
free(slab->I32.data);
free(slab);
}
}
| 20,167 |
462 | <reponame>matvaibhav/pensieve
{
"appDesc": {
"description": "App description.",
"message": "Crie e edite apresentações"
},
"appName": {
"description": "App name.",
"message": "Google Apresentações"
}
}
| 111 |
713 | package org.infinispan.remoting.transport.jgroups;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Properties;
import org.infinispan.commons.util.StringPropertyReplacer;
import org.jgroups.JChannel;
import org.jgroups.conf.ProtocolConfiguration;
import org.jgroups.conf.XmlConfigurator;
/**
* A JGroups {@link JGroupsChannelConfigurator} which loads configuration from an XML file supplied as an {@link InputStream}
*
* @author <NAME> <<EMAIL>>
* @since 10.0
**/
public class FileJGroupsChannelConfigurator extends AbstractJGroupsChannelConfigurator {
private final String name;
private final String path;
private final Properties properties;
private final List<ProtocolConfiguration> stack;
public FileJGroupsChannelConfigurator(String name, String path, InputStream is, Properties properties) throws IOException {
this.name = name;
this.path = path;
this.stack = XmlConfigurator.getInstance(is).getProtocolStack();
this.properties = properties;
}
@Override
public String getProtocolStackString() {
return stack.toString();
}
@Override
public List<ProtocolConfiguration> getProtocolStack() {
this.stack.forEach(c -> StringPropertyReplacer.replaceProperties(c.getProperties(), properties));
return stack;
}
public String getName() {
return name;
}
@Override
public JChannel createChannel(String name) throws Exception {
return applySocketFactory(new JChannel(this));
}
public String getPath() {
return path;
}
}
| 515 |
14,668 | <gh_stars>1000+
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_WEB_VIEW_INTERNAL_CWV_USER_CONTENT_CONTROLLER_INTERNAL_H_
#define IOS_WEB_VIEW_INTERNAL_CWV_USER_CONTENT_CONTROLLER_INTERNAL_H_
#import "ios/web_view/public/cwv_user_content_controller.h"
NS_ASSUME_NONNULL_BEGIN
@class CWVWebViewConfiguration;
@interface CWVUserContentController ()
- (nonnull instancetype)initWithConfiguration:
(nonnull __weak CWVWebViewConfiguration*)configuration;
@end
NS_ASSUME_NONNULL_END
#endif // IOS_WEB_VIEW_INTERNAL_CWV_USER_CONTENT_CONTROLLER_INTERNAL_H_
| 267 |
365 | <reponame>mstute/py-setproctitle
/*-------------------------------------------------------------------------
*
* spt_status.h
*
* Declarations for spt_status.c
*
*-------------------------------------------------------------------------
*/
#ifndef SPT_STATUS_H
#define SPT_STATUS_H
#include "c.h"
HIDDEN extern bool update_process_title;
HIDDEN extern char **save_ps_display_args(int argc, char **argv);
HIDDEN extern void init_ps_display(const char *initial_str);
HIDDEN extern void set_ps_display(const char *activity, bool force);
HIDDEN extern const char *get_ps_display(size_t *displen);
HIDDEN extern void set_thread_title(const char *title);
HIDDEN extern void get_thread_title(char *title);
#endif /* SPT_STATUS_H */
| 242 |
426 | <gh_stars>100-1000
/* Code generated by IfcQuery EXPRESS generator, www.ifcquery.com */
#include <map>
#include "ifcpp/model/BasicTypes.h"
#include "ifcpp/model/BuildingException.h"
#include "ifcpp/reader/ReaderUtil.h"
#include "ifcpp/IFC4/include/IfcAmountOfSubstanceMeasure.h"
#include "ifcpp/IFC4/include/IfcAreaMeasure.h"
#include "ifcpp/IFC4/include/IfcComplexNumber.h"
#include "ifcpp/IFC4/include/IfcContextDependentMeasure.h"
#include "ifcpp/IFC4/include/IfcCountMeasure.h"
#include "ifcpp/IFC4/include/IfcDescriptiveMeasure.h"
#include "ifcpp/IFC4/include/IfcElectricCurrentMeasure.h"
#include "ifcpp/IFC4/include/IfcLengthMeasure.h"
#include "ifcpp/IFC4/include/IfcLuminousIntensityMeasure.h"
#include "ifcpp/IFC4/include/IfcMassMeasure.h"
#include "ifcpp/IFC4/include/IfcNonNegativeLengthMeasure.h"
#include "ifcpp/IFC4/include/IfcNormalisedRatioMeasure.h"
#include "ifcpp/IFC4/include/IfcNumericMeasure.h"
#include "ifcpp/IFC4/include/IfcParameterValue.h"
#include "ifcpp/IFC4/include/IfcPlaneAngleMeasure.h"
#include "ifcpp/IFC4/include/IfcPositiveLengthMeasure.h"
#include "ifcpp/IFC4/include/IfcPositivePlaneAngleMeasure.h"
#include "ifcpp/IFC4/include/IfcPositiveRatioMeasure.h"
#include "ifcpp/IFC4/include/IfcRatioMeasure.h"
#include "ifcpp/IFC4/include/IfcSolidAngleMeasure.h"
#include "ifcpp/IFC4/include/IfcThermodynamicTemperatureMeasure.h"
#include "ifcpp/IFC4/include/IfcTimeMeasure.h"
#include "ifcpp/IFC4/include/IfcVolumeMeasure.h"
#include "ifcpp/IFC4/include/IfcValue.h"
#include "ifcpp/IFC4/include/IfcMeasureValue.h"
// TYPE IfcMeasureValue = SELECT (IfcAmountOfSubstanceMeasure ,IfcAreaMeasure ,IfcComplexNumber ,IfcContextDependentMeasure ,IfcCountMeasure ,IfcDescriptiveMeasure ,IfcElectricCurrentMeasure ,IfcLengthMeasure ,IfcLuminousIntensityMeasure ,IfcMassMeasure ,IfcNonNegativeLengthMeasure ,IfcNormalisedRatioMeasure ,IfcNumericMeasure ,IfcParameterValue ,IfcPlaneAngleMeasure ,IfcPositiveLengthMeasure ,IfcPositivePlaneAngleMeasure ,IfcPositiveRatioMeasure ,IfcRatioMeasure ,IfcSolidAngleMeasure ,IfcThermodynamicTemperatureMeasure ,IfcTimeMeasure ,IfcVolumeMeasure);
shared_ptr<IfcMeasureValue> IfcMeasureValue::createObjectFromSTEP( const std::wstring& arg, const std::map<int,shared_ptr<BuildingEntity> >& map )
{
if( arg.empty() ){ return shared_ptr<IfcMeasureValue>(); }
if( arg.compare(L"$")==0 )
{
return shared_ptr<IfcMeasureValue>();
}
if( arg.compare(L"*")==0 )
{
return shared_ptr<IfcMeasureValue>();
}
shared_ptr<IfcMeasureValue> result_object;
readSelectType( arg, result_object, map );
return result_object;
}
| 1,033 |
3,765 | /*
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.plsql.ast;
import net.sourceforge.pmd.annotation.InternalApi;
public class ASTPackageBody extends net.sourceforge.pmd.lang.plsql.ast.AbstractPLSQLNode implements OracleObject {
@Deprecated
@InternalApi
public ASTPackageBody(int id) {
super(id);
}
@Deprecated
@InternalApi
public ASTPackageBody(PLSQLParser p, int id) {
super(p, id);
}
@Override
public Object jjtAccept(PLSQLParserVisitor visitor, Object data) {
return visitor.visit(this, data);
}
/**
* Gets the name of the Oracle Object.
*
* @return a String representing the name of the Oracle Object
*/
@Override
public String getObjectName() {
return this.getImage();
}
}
| 330 |
307 | <gh_stars>100-1000
//
//
#include "DebugWindow.h"
#include "globalincs/alphacolors.h"
namespace {
uint32_t get_debug_display() {
// If there are two or more monitors then we would like to display the window on the second monitor but it shouldn't
// be shown on the same monitor that the game is running on
// If there is only then then we'll just display it on that one
auto numDisplays = SDL_GetNumVideoDisplays();
if (numDisplays < 1) {
// Some kind of error
return 0;
}
if (numDisplays == 1) {
// We only have one display
return 0;
}
auto mainDisplay = os_config_read_uint("Video", "Display", 0);
if (mainDisplay == 1) {
// Game is on the second monitor => use the primary screen
return 0;
} else {
// Use the secondary screen
return 1;
}
}
}
namespace osapi {
DebugWindow::DebugWindow() {
os::ViewPortProperties attrs;
uint32_t display = get_debug_display();
attrs.display = display;
SDL_Rect size;
SDL_GetDisplayBounds(display, &size);
// Make the window a bit smaller so that it doesn't take up the whole screen
attrs.width = (uint32_t) (size.w / 1.3f);
attrs.height = (uint32_t) (size.h / 1.3f);
attrs.title = "FreeSpace Open - Debug Window";
attrs.flags.set(os::ViewPortFlags::Resizeable); // Make this window resizeable
auto debugView = gr_create_viewport(attrs);
if (debugView) {
debug_view = os::addViewport(std::move(debugView));
debug_sdl_window = debug_view->toSDLWindow();
if (debug_sdl_window != nullptr) {
os::events::addEventListener(SDL_KEYUP,
os::events::DEFAULT_LISTENER_WEIGHT - 5,
[this](const SDL_Event& e) { return this->debug_key_handler(e); });
}
}
if (debug_view->toSDLWindow() != nullptr && os::getSDLMainWindow() != nullptr) {
SDL_RaiseWindow(os::getSDLMainWindow());
}
}
DebugWindow::~DebugWindow() {
}
void DebugWindow::doFrame(float) {
if (!debug_view) {
// Failed to create debug window, nothing to do here
return;
}
gr_use_viewport(debug_view);
gr_clear();
font::set_font(font::FONT1);
if (max_category_width != 0) {
// Draw a line to separate the category from the text
auto x_pos = max_category_width + 14;
gr_set_color_fast(&Color_grey);
gr_line(x_pos, 0, x_pos, gr_screen.max_h, GR_RESIZE_NONE);
}
// Print the saved debug log lines from the bottom up
float current_y = i2fl(gr_screen.max_h - 10);
// If the current view offset is at the end of the list then we also need to display the line that is currently being constructed
if (log_view_offset == lines.size() - 1) {
if (!current_line.empty()) {
LineInfo info;
info.text = current_line;
info.category = current_category;
current_y = print_line(current_y, info);
}
}
// Now print all the stored lines
// Backwards iteration of the vector comes from here: http://stackoverflow.com/a/4206815
for (auto i = log_view_offset + 1; i-- > 0;) {
auto& line = lines[i];
current_y = print_line(current_y, line);
if (current_y < 0.f) {
// End to iteration if the rendered string would be invisible
break;
}
}
// Don't run scripting here
gr_flip(false);
gr_use_viewport(os::getMainViewport());
}
float DebugWindow::print_line(float bottom_y, const LineInfo& line) {
int category_width;
gr_get_string_size(&category_width, nullptr, line.category.c_str());
// Wrap the string to make sure everything can be read
SCP_vector<const char*> split_lines;
SCP_vector<int> line_lengths;
// Substract 40 so that we can have margins on both sides
// Make sure that the width doesn't go too low or else split_str will not be able to fit enough characters in one line
auto max_w = std::max(40, gr_screen.max_w - max_category_width - 40);
split_str(line.text.c_str(), max_w, line_lengths, split_lines);
auto text_height = split_lines.size() * font::get_current_font()->getHeight();
float y_pos = bottom_y - text_height;
float cat_x_pos = max_category_width - category_width + 10.f;
// Give each category a unique color. We do this by hashing the string and using that to construct the RGB values
auto hash = std::hash<SCP_string>()(line.category);
gr_set_color((int) (hash & 0xFF), (int) ((hash & 0xFF00) >> 8), (int) ((hash & 0xFF0000) >> 16));
gr_string(cat_x_pos, y_pos, line.category.c_str(), GR_RESIZE_NONE);
gr_set_color_fast(&Color_white);
for (size_t i = 0; i < split_lines.size(); ++i) {
gr_string(max_category_width + 18.f, y_pos, split_lines[i], GR_RESIZE_NONE, line_lengths[i]);
y_pos += font::get_current_font()->getHeight();
}
return bottom_y - text_height;
}
bool DebugWindow::debug_key_handler(const SDL_Event& evt) {
if (!os::events::isWindowEvent(evt, debug_sdl_window)) {
// Event belongs to another window
return false;
}
ptrdiff_t diff = 0;
switch(evt.key.keysym.sym) {
case SDLK_DOWN:
diff = 1;
break;
case SDLK_UP:
diff = -1;
break;
case SDLK_PAGEDOWN:
diff = 60; // It's not actually a page but this should still work
break;
case SDLK_PAGEUP:
diff = -60;
break;
case SDLK_END:
log_view_offset = lines.size() - 1;
return true;
case SDLK_HOME:
log_view_offset = 0;
return true;
default:
return true;
}
auto prev = log_view_offset;
log_view_offset += diff;
if (diff < 0 && log_view_offset > prev) {
// We had an overflow of the counter
log_view_offset = 0;
}
log_view_offset = std::min(log_view_offset, lines.size() - 1);
return true;
}
void DebugWindow::addDebugMessage(const char* category, const char* text) {
if (!debug_view) {
// Failed to create debug window, nothing to do here
return;
}
if (category != current_category) {
// There is a new category so we need to write the old string to our log
// Find finished lines and add them to our vector
split_current_and_add_to_log(category);
if (!current_line.empty()) {
// Write the last line to our log
LineInfo info;
info.text = current_line;
info.category = category;
addToLog(std::move(info));
current_line.clear();
}
}
current_category = category;
current_line += text;
split_current_and_add_to_log(category);
}
void DebugWindow::split_current_and_add_to_log(const SCP_string& category) {
size_t pos;
while ((pos = current_line.find('\n')) != SCP_string::npos) {
LineInfo info;
info.text = current_line.substr(0, pos);
info.category = category;
addToLog(std::move(info));
// Since pos refers to a valid position in the string, the expression pos + 1 is always valid since substr
// allows values up to str.size()
current_line = current_line.substr(pos + 1);
}
}
void DebugWindow::addToLog(LineInfo&& line) {
int cat_width;
gr_get_string_size(&cat_width, nullptr, line.category.c_str());
max_category_width = std::max(cat_width, max_category_width);
if (log_view_offset == lines.size() - 1) {
// We are currently showing the last line so we need to follow new entries
++log_view_offset;
}
lines.push_back(std::move(line));
}
}
| 2,548 |
852 | <gh_stars>100-1000
#ifndef PhysicsTools_Heppy_CMGMuonCleanerBySegmentsAlgo_h
#define PhysicsTools_Heppy_CMGMuonCleanerBySegmentsAlgo_h
#include "DataFormats/PatCandidates/interface/Muon.h"
#include "CommonTools/Utils/interface/StringCutObjectSelector.h"
#include <vector>
namespace heppy {
class CMGMuonCleanerBySegmentsAlgo {
public:
CMGMuonCleanerBySegmentsAlgo(double sharedFraction = 0.499,
const std::string &preselection = "track.isNonnull",
const std::string &passthrough = "isGlobalMuon && numberOfMatches >= 2")
: sharedFraction_(sharedFraction), preselection_(preselection, true), passthrough_(passthrough, true) {}
~CMGMuonCleanerBySegmentsAlgo();
/// Return a vector of boolean marking the muons to be considered clean
std::vector<bool> clean(const std::vector<pat::Muon> &muons) const;
private:
/// Fraction of shared segments
double sharedFraction_;
/// Preselection cut
StringCutObjectSelector<pat::Muon> preselection_;
/// Always-accept cut
StringCutObjectSelector<pat::Muon> passthrough_;
/// Methods
bool isSameMuon(const pat::Muon &mu1, const pat::Muon &mu2) const;
bool isBetterMuon(const pat::Muon &mu1, bool mu1PF, const pat::Muon &mu2, bool mu2PF) const;
};
} // namespace heppy
#endif
| 525 |
315 | <reponame>Unthrottled/GrepConsole<filename>src/krasa/grepconsole/plugin/GrepConsoleApplicationComponent.java<gh_stars>100-1000
package krasa.grepconsole.plugin;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.components.ApplicationComponent;
import com.intellij.openapi.components.PersistentStateComponent;
import com.intellij.openapi.components.State;
import com.intellij.openapi.components.Storage;
import krasa.grepconsole.Cloner;
import krasa.grepconsole.model.GrepExpressionGroup;
import krasa.grepconsole.model.GrepExpressionItem;
import krasa.grepconsole.model.Profile;
import krasa.grepconsole.model.TailSettings;
import krasa.grepconsole.tail.remotecall.RemoteCallService;
import org.jetbrains.annotations.NotNull;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
@State(name = "GrepConsole", storages = {@Storage("GrepConsole.xml")})
public class GrepConsoleApplicationComponent
implements ApplicationComponent,
PersistentStateComponent<PluginState> {
protected List<GrepExpressionItem> foldingsCache;
private PluginState settings;
protected int cachedMaxLengthToMatch = Integer.MAX_VALUE;
private Integer maxProcessingTimeAsInt;
public GrepConsoleApplicationComponent() {
}
public static GrepConsoleApplicationComponent getInstance() {
return ApplicationManager.getApplication().getComponent(GrepConsoleApplicationComponent.class);
}
public Integer getCachedMaxProcessingTimeAsInt() {
return maxProcessingTimeAsInt;
}
public int getCachedMaxLengthToMatch() {
return cachedMaxLengthToMatch;
}
public List<GrepExpressionItem> getCachedFoldingItems() {
if (foldingsCache == null) {
synchronized (this) {
if (foldingsCache == null) {
initFoldingCache();
}
}
}
return foldingsCache;
}
void initFoldingCache() {
List<GrepExpressionItem> list = new ArrayList<>();
Profile profile = getInstance().getState().getDefaultProfile();
maxProcessingTimeAsInt = profile.getMaxProcessingTimeAsInt();
if (profile.isEnableMaxLengthLimit()) {
cachedMaxLengthToMatch = profile.getMaxLengthToMatchAsInt();
} else {
cachedMaxLengthToMatch = Integer.MAX_VALUE;
}
List<GrepExpressionItem> grepExpressionItems = profile.getAllGrepExpressionItems();
for (GrepExpressionItem grepExpressionItem : grepExpressionItems) {
boolean enableFoldings = profile.isEnableFoldings();
boolean enabled = grepExpressionItem.isEnabled();
boolean fold = grepExpressionItem.isFold();
if (enableFoldings && enabled && fold) {
list.add(grepExpressionItem);
}
}
foldingsCache = list;
}
@Override
public void initComponent() {
final TailSettings tailSettings = getState().getTailSettings();
if (tailSettings.isEnabled()) {
RemoteCallService.getInstance().rebind(tailSettings);
}
}
@Override
public void disposeComponent() {
// TODO: insert component disposal logic here
}
@Override
@NotNull
public String getComponentName() {
return "GrepConsole";
}
@Override
@NotNull
public PluginState getState() {
if (settings == null) {
settings = new PluginState();
settings.setProfiles(DefaultState.createDefault());
}
return settings;
}
@Override
public void loadState(PluginState state) {
this.settings = state;
migrate();
}
protected void migrate() {
if (settings.getVersion() < 1) {
List<Profile> profiles = settings.getProfiles();
for (Profile profile : profiles) {
for (GrepExpressionGroup grepExpressionGroup : profile.getGrepExpressionGroups()) {
for (Iterator<GrepExpressionItem> iterator = grepExpressionGroup.getGrepExpressionItems().iterator(); iterator.hasNext(); ) {
GrepExpressionItem grepExpressionItem = iterator.next();
if (grepExpressionItem.isInputFilter()) {
GrepExpressionItem newItem = Cloner.deepClone(grepExpressionItem);
newItem.action(GrepExpressionItem.ACTION_REMOVE);
GrepExpressionGroup group = profile.getOrCreateInputFilterGroup(grepExpressionGroup.getName());
group.add(newItem);
iterator.remove();
}
}
}
}
settings.setVersion(1);
}
}
}
| 1,403 |
2,151 | <reponame>zipated/src
//
// Copyright 2018 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// vk_internal_shaders.cpp:
// Pre-generated shader library for the ANGLE Vulkan back-end.
#include "libANGLE/renderer/vulkan/vk_internal_shaders.h"
#include "libANGLE/renderer/vulkan/RendererVk.h"
namespace rx
{
namespace vk
{
ShaderLibrary::ShaderLibrary()
{
}
ShaderLibrary::~ShaderLibrary()
{
}
void ShaderLibrary::destroy(VkDevice device)
{
for (ShaderAndSerial &shader : mShaders)
{
shader.get().destroy(device);
}
}
Error ShaderLibrary::getShader(RendererVk *renderer,
InternalShaderID shaderID,
const ShaderAndSerial **shaderOut)
{
ShaderAndSerial &shader = mShaders[shaderID];
*shaderOut = &shader;
if (shader.get().valid())
{
return NoError();
}
const priv::ShaderBlob &shaderCode = priv::GetInternalShaderBlob(shaderID);
// Create shader lazily. Access will need to be locked for multi-threading.
VkShaderModuleCreateInfo createInfo;
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
createInfo.pNext = nullptr;
createInfo.flags = 0;
createInfo.codeSize = shaderCode.codeSize;
createInfo.pCode = shaderCode.code;
ANGLE_TRY(shader.get().init(renderer->getDevice(), createInfo));
shader.updateSerial(renderer->issueShaderSerial());
return NoError();
}
} // namespace vk
} // namespace rx
| 648 |
314 | //
// UIView+ZJHelperKitUIkit.h
// ZJUIKit
//
// Created by dzj on 2018/1/17.
// Copyright © 2018年 kapokcloud. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface UIView (ZJHelperKit)
#pragma mark - Convenience frame api
/**
* view.frame.origin.x
*/
@property (nonatomic, assign) CGFloat zj_originX;
/**
* view.frame.origin.y
*/
@property (nonatomic, assign) CGFloat zj_originY;
/**
* view.frame.origin
*/
@property (nonatomic, assign) CGPoint zj_origin;
/**
* view.center.x
*/
@property (nonatomic, assign) CGFloat zj_centerX;
/**
* view.center.y
*/
@property (nonatomic, assign) CGFloat zj_centerY;
/**
* view.center
*/
@property (nonatomic, assign) CGPoint zj_center;
/**
* view.frame.size.width
*/
@property (nonatomic, assign) CGFloat zj_width;
/**
* view.frame.size.height
*/
@property (nonatomic, assign) CGFloat zj_height;
/**
* view.frame.size
*/
@property (nonatomic, assign) CGSize zj_size;
/**
* view.frame.size.height + view.frame.origin.y
*/
@property (nonatomic, assign) CGFloat zj_bottomY;
/**
* view.frame.size.width + view.frame.origin.x
*/
@property (nonatomic, assign) CGFloat zj_rightX;
@end
| 457 |
1,706 | <filename>app/src/main/java/com/lguipeng/notes/view/BetterFab.java
package com.lguipeng.notes.view;
import android.content.Context;
import android.support.design.widget.FloatingActionButton;
import android.util.AttributeSet;
/**
* Created by lgp on 2015/8/2.
*/
public class BetterFab extends FloatingActionButton{
private boolean forceHide = false;
public BetterFab(Context context, AttributeSet attrs) {
super(context, attrs);
}
public BetterFab(Context context, AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
}
public BetterFab(Context context) {
super(context);
}
public boolean isForceHide() {
return forceHide;
}
public void setForceHide(boolean forceHide) {
this.forceHide = forceHide;
if (!forceHide) {
setVisibility(VISIBLE);
}else {
setVisibility(GONE);
}
}
//if hide,disable animation
public boolean canAnimation(){
return !isForceHide();
}
}
| 407 |
450 | /*
* Copyright (C) 2008-2018 <NAME> <<EMAIL>>
* Copyright (C) 2008 <NAME> <<EMAIL>>
*
* Licence: wxWindows Library Licence, Version 3.1
*/
#ifndef __FAKE_SAMPLER_H__
#define __FAKE_SAMPLER_H__
#include <glib-object.h>
#include <gum/prof/gumsampler.h>
G_BEGIN_DECLS
#define GUM_TYPE_FAKE_SAMPLER (gum_fake_sampler_get_type ())
G_DECLARE_FINAL_TYPE (GumFakeSampler, gum_fake_sampler, GUM, FAKE_SAMPLER,
GObject)
GumSampler * gum_fake_sampler_new (void);
void gum_fake_sampler_advance (GumFakeSampler * self, GumSample delta);
G_END_DECLS
#endif
| 243 |
31,928 | <reponame>matt-mercer/localstack<gh_stars>1000+
import json
import time
def handler(event, context):
result = {"executionStart": time.time(), "event": event}
time.sleep(5)
# Just print the event was passed to lambda
print(json.dumps(result))
return result
| 98 |
3,428 | <reponame>mhmdaminraeisi/stdlib
/**
* @license Apache-2.0
*
* Copyright (c) 2021 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "stdlib/ndarray/base/unary/c_c.h"
#include "stdlib/ndarray/base/unary/typedefs.h"
#include "stdlib/ndarray/base/unary/macros.h"
#include "stdlib/ndarray/base/unary/dispatch_object.h"
#include "stdlib/ndarray/base/unary/dispatch.h"
#include "stdlib/ndarray/ctor.h"
#include <stdint.h>
#include <complex.h>
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a zero-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a zero-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 0;
*
* // Define the array shapes:
* int64_t shape[] = {};
*
* // Define the strides:
* int64_t sx[] = { 0 };
* int64_t sy[] = { 0 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_0d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_0d( struct ndarray *arrays[], void *fcn ) {
float complex v;
int8_t status = stdlib_ndarray_iget_complex64( arrays[ 0 ], 0, &v );
if ( status != 0 ) {
return -1;
}
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
status = stdlib_ndarray_iset_complex64( arrays[ 1 ], 0, f( v ) );
if ( status != 0 ) {
return -1;
}
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a one-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a one-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 1;
*
* // Define the array shapes:
* int64_t shape[] = { 2 };
*
* // Define the strides:
* int64_t sx[] = { 8 };
* int64_t sy[] = { 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_1d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_1d( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_1D_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a two-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a two-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 2;
*
* // Define the array shapes:
* int64_t shape[] = { 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 16, 8 };
* int64_t sy[] = { 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_2d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_2d( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_2D_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a two-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a two-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 2;
*
* // Define the array shapes:
* int64_t shape[] = { 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 16, 8 };
* int64_t sy[] = { 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_2d_blocked( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_2d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_2D_BLOCKED_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a three-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a three-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 3;
*
* // Define the array shapes:
* int64_t shape[] = { 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 16, 8 };
* int64_t sy[] = { 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_3d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_3d( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_3D_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a three-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a three-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 3;
*
* // Define the array shapes:
* int64_t shape[] = { 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 16, 8 };
* int64_t sy[] = { 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_3d_blocked( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_3d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_3D_BLOCKED_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a four-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a four-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 4;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_4d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_4d( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_4D_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a four-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a four-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 4;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_4d_blocked( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_4d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_4D_BLOCKED_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a five-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a five-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 5;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_5d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_5d( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_5D_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a five-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a five-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 5;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_5d_blocked( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_5d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_5D_BLOCKED_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a six-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a six-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 6;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_6d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_6d( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_6D_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a six-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a six-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 6;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_6d_blocked( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_6d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_6D_BLOCKED_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a seven-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a seven-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 7;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_7d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_7d( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_7D_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a seven-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a seven-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 7;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_7d_blocked( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_7d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_7D_BLOCKED_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to an eight-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in an eight-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 8;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_8d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_8d( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_8D_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to an eight-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in an eight-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 8;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_8d_blocked( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_8d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_8D_BLOCKED_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a nine-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a nine-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 9;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 32, 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_9d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_9d( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_9D_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a nine-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a nine-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 9;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 32, 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_9d_blocked( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_9d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_9D_BLOCKED_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a ten-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a ten-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 10;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 32, 32, 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_10d( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_10d( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_10D_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to a ten-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in a ten-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 10;
*
* // Define the array shapes:
* int64_t shape[] = { 1, 1, 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 32, 32, 32, 16, 8 };
* int64_t sy[] = { 32, 32, 32, 32, 32, 32, 32, 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_10d_blocked( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_10d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_10D_BLOCKED_LOOP_CLBK( float complex, float complex )
return 0;
}
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to an n-dimensional single-precision complex floating-point number input ndarray and assigns results to elements in an n-dimensional single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 3;
*
* // Define the array shapes:
* int64_t shape[] = { 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 16, 8 };
* int64_t sy[] = { 32, 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c_nd( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c_nd( struct ndarray *arrays[], void *fcn ) {
typedef float complex func_type( const float complex x );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_UNARY_ND_LOOP_CLBK( float complex, float complex )
return 0;
}
// Define a list of unary ndarray functions:
static ndarrayUnaryFcn functions[] = {
stdlib_ndarray_c_c_0d,
stdlib_ndarray_c_c_1d,
stdlib_ndarray_c_c_2d,
stdlib_ndarray_c_c_3d,
stdlib_ndarray_c_c_4d,
stdlib_ndarray_c_c_5d,
stdlib_ndarray_c_c_6d,
stdlib_ndarray_c_c_7d,
stdlib_ndarray_c_c_8d,
stdlib_ndarray_c_c_9d,
stdlib_ndarray_c_c_10d,
stdlib_ndarray_c_c_nd
};
// Define a list of unary ndarray functions implementing loop blocking...
static ndarrayUnaryFcn blocked_functions[] = {
stdlib_ndarray_c_c_2d_blocked,
stdlib_ndarray_c_c_3d_blocked,
stdlib_ndarray_c_c_4d_blocked,
stdlib_ndarray_c_c_5d_blocked,
stdlib_ndarray_c_c_6d_blocked,
stdlib_ndarray_c_c_7d_blocked,
stdlib_ndarray_c_c_8d_blocked,
stdlib_ndarray_c_c_9d_blocked,
stdlib_ndarray_c_c_10d_blocked
};
// Create a unary function dispatch object:
static const struct ndarrayUnaryDispatchObject obj = {
// Array containing unary ndarray functions:
functions,
// Number of unary ndarray functions:
12,
// Array containing unary ndarray functions using loop blocking:
blocked_functions,
// Number of unary ndarray functions using loop blocking:
9
};
/**
* Applies a unary callback accepting and returning single-precision complex floating-point numbers to an single-precision complex floating-point number input ndarray and assigns results to elements in an single-precision complex floating-point number output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose first element is a pointer to an input ndarray and whose last element is a pointer to an output ndarray
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/unary/c_c.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
* #include <complex.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE dtype = STDLIB_NDARRAY_COMPLEX64;
*
* // Create underlying byte arrays:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
* uint8_t ybuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 2;
*
* // Define the array shapes:
* int64_t shape[] = { 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 16, 8 };
* int64_t sy[] = { 16, 8 };
*
* // Define the offsets:
* int64_t ox = 0;
* int64_t oy = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an input ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( dtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an output ndarray:
* struct ndarray *y = stdlib_ndarray_allocate( dtype, ybuf, ndims, shape, sy, oy, order, imode, nsubmodes, submodes );
* if ( y == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing the ndarrays:
* struct ndarray *arrays[] = { x, y };
*
* // Define a callback:
* float complex scale( const float complex x ) {
* float re = crealf( x );
* float im = cimagf( x );
* return ( re+10.0f ) + ( im+10.0f )*I;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_c_c( arrays, (void *)scale );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
* stdlib_ndarray_free( y );
*/
int8_t stdlib_ndarray_c_c( struct ndarray *arrays[], void *fcn ) {
return stdlib_ndarray_unary_dispatch( &obj, arrays, fcn );
}
| 31,882 |
1,318 | <filename>ClangLib/usr/lib/clang/13.0.0/include/riscv_vector.h
/*===---- riscv_vector.h - RISC-V V-extension RVVIntrinsics -------------------===
*
*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*===-----------------------------------------------------------------------===
*/
#ifndef __RISCV_VECTOR_H
#define __RISCV_VECTOR_H
#include <stdint.h>
#include <stddef.h>
#ifndef __riscv_vector
#error "Vector intrinsics require the vector extension."
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
#define vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
#define vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
#define vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)
#define vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)
#define vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)
#define vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)
#define vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
#define vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)
#define vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)
#define vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)
#define vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)
#define vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)
#define vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
#define vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)
#define vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)
#define vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
#define vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
#define vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)
#define vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
#define vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
#define vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
#define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
#define vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
#define vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
#define vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)
#define vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)
#define vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)
#define vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)
#define vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
#define vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)
#define vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)
#define vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)
#define vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)
#define vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)
#define vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
#define vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)
#define vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)
#define vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
#define vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
#define vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)
#define vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
#define vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
#define vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
typedef __rvv_bool64_t vbool64_t;
typedef __rvv_bool32_t vbool32_t;
typedef __rvv_bool16_t vbool16_t;
typedef __rvv_bool8_t vbool8_t;
typedef __rvv_bool4_t vbool4_t;
typedef __rvv_bool2_t vbool2_t;
typedef __rvv_bool1_t vbool1_t;
typedef __rvv_int8mf8_t vint8mf8_t;
typedef __rvv_uint8mf8_t vuint8mf8_t;
typedef __rvv_int8mf4_t vint8mf4_t;
typedef __rvv_uint8mf4_t vuint8mf4_t;
typedef __rvv_int8mf2_t vint8mf2_t;
typedef __rvv_uint8mf2_t vuint8mf2_t;
typedef __rvv_int8m1_t vint8m1_t;
typedef __rvv_uint8m1_t vuint8m1_t;
typedef __rvv_int8m2_t vint8m2_t;
typedef __rvv_uint8m2_t vuint8m2_t;
typedef __rvv_int8m4_t vint8m4_t;
typedef __rvv_uint8m4_t vuint8m4_t;
typedef __rvv_int8m8_t vint8m8_t;
typedef __rvv_uint8m8_t vuint8m8_t;
typedef __rvv_int16mf4_t vint16mf4_t;
typedef __rvv_uint16mf4_t vuint16mf4_t;
typedef __rvv_int16mf2_t vint16mf2_t;
typedef __rvv_uint16mf2_t vuint16mf2_t;
typedef __rvv_int16m1_t vint16m1_t;
typedef __rvv_uint16m1_t vuint16m1_t;
typedef __rvv_int16m2_t vint16m2_t;
typedef __rvv_uint16m2_t vuint16m2_t;
typedef __rvv_int16m4_t vint16m4_t;
typedef __rvv_uint16m4_t vuint16m4_t;
typedef __rvv_int16m8_t vint16m8_t;
typedef __rvv_uint16m8_t vuint16m8_t;
typedef __rvv_int32mf2_t vint32mf2_t;
typedef __rvv_uint32mf2_t vuint32mf2_t;
typedef __rvv_int32m1_t vint32m1_t;
typedef __rvv_uint32m1_t vuint32m1_t;
typedef __rvv_int32m2_t vint32m2_t;
typedef __rvv_uint32m2_t vuint32m2_t;
typedef __rvv_int32m4_t vint32m4_t;
typedef __rvv_uint32m4_t vuint32m4_t;
typedef __rvv_int32m8_t vint32m8_t;
typedef __rvv_uint32m8_t vuint32m8_t;
typedef __rvv_int64m1_t vint64m1_t;
typedef __rvv_uint64m1_t vuint64m1_t;
typedef __rvv_int64m2_t vint64m2_t;
typedef __rvv_uint64m2_t vuint64m2_t;
typedef __rvv_int64m4_t vint64m4_t;
typedef __rvv_uint64m4_t vuint64m4_t;
typedef __rvv_int64m8_t vint64m8_t;
typedef __rvv_uint64m8_t vuint64m8_t;
#if defined(__riscv_zfh)
typedef __rvv_float16mf4_t vfloat16mf4_t;
typedef __rvv_float16mf2_t vfloat16mf2_t;
typedef __rvv_float16m1_t vfloat16m1_t;
typedef __rvv_float16m2_t vfloat16m2_t;
typedef __rvv_float16m4_t vfloat16m4_t;
typedef __rvv_float16m8_t vfloat16m8_t;
#endif
#if defined(__riscv_f)
typedef __rvv_float32mf2_t vfloat32mf2_t;
typedef __rvv_float32m1_t vfloat32m1_t;
typedef __rvv_float32m2_t vfloat32m2_t;
typedef __rvv_float32m4_t vfloat32m4_t;
typedef __rvv_float32m8_t vfloat32m8_t;
#endif
#if defined(__riscv_d)
typedef __rvv_float64m1_t vfloat64m1_t;
typedef __rvv_float64m2_t vfloat64m2_t;
typedef __rvv_float64m4_t vfloat64m4_t;
typedef __rvv_float64m8_t vfloat64m8_t;
#endif
#define vadd_vv_i8m1(op0, op1, op2) \
__builtin_rvv_vadd_vv_i8m1((vint8m1_t)(op0), (vint8m1_t)(op1), (size_t)(op2))
#define vadd_vv_i8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i8m1_m((vint8m1_t)(op0), (vint8m1_t)(op1), (vint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vv_i8m2(op0, op1, op2) \
__builtin_rvv_vadd_vv_i8m2((vint8m2_t)(op0), (vint8m2_t)(op1), (size_t)(op2))
#define vadd_vv_i8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i8m2_m((vint8m2_t)(op0), (vint8m2_t)(op1), (vint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vv_i8m4(op0, op1, op2) \
__builtin_rvv_vadd_vv_i8m4((vint8m4_t)(op0), (vint8m4_t)(op1), (size_t)(op2))
#define vadd_vv_i8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i8m4_m((vint8m4_t)(op0), (vint8m4_t)(op1), (vint8m4_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vadd_vv_i8m8(op0, op1, op2) \
__builtin_rvv_vadd_vv_i8m8((vint8m8_t)(op0), (vint8m8_t)(op1), (size_t)(op2))
#define vadd_vv_i8m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i8m8_m((vint8m8_t)(op0), (vint8m8_t)(op1), (vint8m8_t)(op2), (vbool1_t)(op3), (size_t)(op4))
#define vadd_vv_i8mf2(op0, op1, op2) \
__builtin_rvv_vadd_vv_i8mf2((vint8mf2_t)(op0), (vint8mf2_t)(op1), (size_t)(op2))
#define vadd_vv_i8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i8mf2_m((vint8mf2_t)(op0), (vint8mf2_t)(op1), (vint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vv_i8mf4(op0, op1, op2) \
__builtin_rvv_vadd_vv_i8mf4((vint8mf4_t)(op0), (vint8mf4_t)(op1), (size_t)(op2))
#define vadd_vv_i8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i8mf4_m((vint8mf4_t)(op0), (vint8mf4_t)(op1), (vint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vv_i8mf8(op0, op1, op2) \
__builtin_rvv_vadd_vv_i8mf8((vint8mf8_t)(op0), (vint8mf8_t)(op1), (size_t)(op2))
#define vadd_vv_i8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i8mf8_m((vint8mf8_t)(op0), (vint8mf8_t)(op1), (vint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vv_i16m1(op0, op1, op2) \
__builtin_rvv_vadd_vv_i16m1((vint16m1_t)(op0), (vint16m1_t)(op1), (size_t)(op2))
#define vadd_vv_i16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i16m1_m((vint16m1_t)(op0), (vint16m1_t)(op1), (vint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vv_i16m2(op0, op1, op2) \
__builtin_rvv_vadd_vv_i16m2((vint16m2_t)(op0), (vint16m2_t)(op1), (size_t)(op2))
#define vadd_vv_i16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i16m2_m((vint16m2_t)(op0), (vint16m2_t)(op1), (vint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vv_i16m4(op0, op1, op2) \
__builtin_rvv_vadd_vv_i16m4((vint16m4_t)(op0), (vint16m4_t)(op1), (size_t)(op2))
#define vadd_vv_i16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i16m4_m((vint16m4_t)(op0), (vint16m4_t)(op1), (vint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vv_i16m8(op0, op1, op2) \
__builtin_rvv_vadd_vv_i16m8((vint16m8_t)(op0), (vint16m8_t)(op1), (size_t)(op2))
#define vadd_vv_i16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i16m8_m((vint16m8_t)(op0), (vint16m8_t)(op1), (vint16m8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vadd_vv_i16mf2(op0, op1, op2) \
__builtin_rvv_vadd_vv_i16mf2((vint16mf2_t)(op0), (vint16mf2_t)(op1), (size_t)(op2))
#define vadd_vv_i16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i16mf2_m((vint16mf2_t)(op0), (vint16mf2_t)(op1), (vint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vv_i16mf4(op0, op1, op2) \
__builtin_rvv_vadd_vv_i16mf4((vint16mf4_t)(op0), (vint16mf4_t)(op1), (size_t)(op2))
#define vadd_vv_i16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i16mf4_m((vint16mf4_t)(op0), (vint16mf4_t)(op1), (vint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vv_i32m1(op0, op1, op2) \
__builtin_rvv_vadd_vv_i32m1((vint32m1_t)(op0), (vint32m1_t)(op1), (size_t)(op2))
#define vadd_vv_i32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i32m1_m((vint32m1_t)(op0), (vint32m1_t)(op1), (vint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vv_i32m2(op0, op1, op2) \
__builtin_rvv_vadd_vv_i32m2((vint32m2_t)(op0), (vint32m2_t)(op1), (size_t)(op2))
#define vadd_vv_i32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i32m2_m((vint32m2_t)(op0), (vint32m2_t)(op1), (vint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vv_i32m4(op0, op1, op2) \
__builtin_rvv_vadd_vv_i32m4((vint32m4_t)(op0), (vint32m4_t)(op1), (size_t)(op2))
#define vadd_vv_i32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i32m4_m((vint32m4_t)(op0), (vint32m4_t)(op1), (vint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vv_i32m8(op0, op1, op2) \
__builtin_rvv_vadd_vv_i32m8((vint32m8_t)(op0), (vint32m8_t)(op1), (size_t)(op2))
#define vadd_vv_i32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i32m8_m((vint32m8_t)(op0), (vint32m8_t)(op1), (vint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vv_i32mf2(op0, op1, op2) \
__builtin_rvv_vadd_vv_i32mf2((vint32mf2_t)(op0), (vint32mf2_t)(op1), (size_t)(op2))
#define vadd_vv_i32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i32mf2_m((vint32mf2_t)(op0), (vint32mf2_t)(op1), (vint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vv_i64m1(op0, op1, op2) \
__builtin_rvv_vadd_vv_i64m1((vint64m1_t)(op0), (vint64m1_t)(op1), (size_t)(op2))
#define vadd_vv_i64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i64m1_m((vint64m1_t)(op0), (vint64m1_t)(op1), (vint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vv_i64m2(op0, op1, op2) \
__builtin_rvv_vadd_vv_i64m2((vint64m2_t)(op0), (vint64m2_t)(op1), (size_t)(op2))
#define vadd_vv_i64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i64m2_m((vint64m2_t)(op0), (vint64m2_t)(op1), (vint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vv_i64m4(op0, op1, op2) \
__builtin_rvv_vadd_vv_i64m4((vint64m4_t)(op0), (vint64m4_t)(op1), (size_t)(op2))
#define vadd_vv_i64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i64m4_m((vint64m4_t)(op0), (vint64m4_t)(op1), (vint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vv_i64m8(op0, op1, op2) \
__builtin_rvv_vadd_vv_i64m8((vint64m8_t)(op0), (vint64m8_t)(op1), (size_t)(op2))
#define vadd_vv_i64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_i64m8_m((vint64m8_t)(op0), (vint64m8_t)(op1), (vint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vle8_v_i8m1(op0, op1) \
__builtin_rvv_vle8_v_i8m1((const int8_t *)(op0), (size_t)(op1))
#define vle8_v_i8m1_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_i8m1_m((vint8m1_t)(op0), (const int8_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vle8_v_i8m2(op0, op1) \
__builtin_rvv_vle8_v_i8m2((const int8_t *)(op0), (size_t)(op1))
#define vle8_v_i8m2_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_i8m2_m((vint8m2_t)(op0), (const int8_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vle8_v_i8m4(op0, op1) \
__builtin_rvv_vle8_v_i8m4((const int8_t *)(op0), (size_t)(op1))
#define vle8_v_i8m4_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_i8m4_m((vint8m4_t)(op0), (const int8_t *)(op1), (vbool2_t)(op2), (size_t)(op3))
#define vle8_v_i8m8(op0, op1) \
__builtin_rvv_vle8_v_i8m8((const int8_t *)(op0), (size_t)(op1))
#define vle8_v_i8m8_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_i8m8_m((vint8m8_t)(op0), (const int8_t *)(op1), (vbool1_t)(op2), (size_t)(op3))
#define vle8_v_i8mf2(op0, op1) \
__builtin_rvv_vle8_v_i8mf2((const int8_t *)(op0), (size_t)(op1))
#define vle8_v_i8mf2_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_i8mf2_m((vint8mf2_t)(op0), (const int8_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vle8_v_i8mf4(op0, op1) \
__builtin_rvv_vle8_v_i8mf4((const int8_t *)(op0), (size_t)(op1))
#define vle8_v_i8mf4_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_i8mf4_m((vint8mf4_t)(op0), (const int8_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vle8_v_i8mf8(op0, op1) \
__builtin_rvv_vle8_v_i8mf8((const int8_t *)(op0), (size_t)(op1))
#define vle8_v_i8mf8_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_i8mf8_m((vint8mf8_t)(op0), (const int8_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vloxei64_v_u64m1(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u64m1((const uint64_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vloxei64_v_u64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u64m1_m((vuint64m1_t)(op0), (const uint64_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei64_v_u64m2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u64m2((const uint64_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vloxei64_v_u64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u64m2_m((vuint64m2_t)(op0), (const uint64_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei64_v_u64m4(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u64m4((const uint64_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vloxei64_v_u64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u64m4_m((vuint64m4_t)(op0), (const uint64_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei64_v_u64m8(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u64m8((const uint64_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vloxei64_v_u64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u64m8_m((vuint64m8_t)(op0), (const uint64_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vx_i8m1(op0, op1, op2) \
__builtin_rvv_vadd_vx_i8m1((vint8m1_t)(op0), (int8_t)(op1), (size_t)(op2))
#define vadd_vx_i8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i8m1_m((vint8m1_t)(op0), (vint8m1_t)(op1), (int8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vx_i8m2(op0, op1, op2) \
__builtin_rvv_vadd_vx_i8m2((vint8m2_t)(op0), (int8_t)(op1), (size_t)(op2))
#define vadd_vx_i8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i8m2_m((vint8m2_t)(op0), (vint8m2_t)(op1), (int8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vx_i8m4(op0, op1, op2) \
__builtin_rvv_vadd_vx_i8m4((vint8m4_t)(op0), (int8_t)(op1), (size_t)(op2))
#define vadd_vx_i8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i8m4_m((vint8m4_t)(op0), (vint8m4_t)(op1), (int8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vadd_vx_i8m8(op0, op1, op2) \
__builtin_rvv_vadd_vx_i8m8((vint8m8_t)(op0), (int8_t)(op1), (size_t)(op2))
#define vadd_vx_i8m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i8m8_m((vint8m8_t)(op0), (vint8m8_t)(op1), (int8_t)(op2), (vbool1_t)(op3), (size_t)(op4))
#define vadd_vx_i8mf2(op0, op1, op2) \
__builtin_rvv_vadd_vx_i8mf2((vint8mf2_t)(op0), (int8_t)(op1), (size_t)(op2))
#define vadd_vx_i8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i8mf2_m((vint8mf2_t)(op0), (vint8mf2_t)(op1), (int8_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vx_i8mf4(op0, op1, op2) \
__builtin_rvv_vadd_vx_i8mf4((vint8mf4_t)(op0), (int8_t)(op1), (size_t)(op2))
#define vadd_vx_i8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i8mf4_m((vint8mf4_t)(op0), (vint8mf4_t)(op1), (int8_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vx_i8mf8(op0, op1, op2) \
__builtin_rvv_vadd_vx_i8mf8((vint8mf8_t)(op0), (int8_t)(op1), (size_t)(op2))
#define vadd_vx_i8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i8mf8_m((vint8mf8_t)(op0), (vint8mf8_t)(op1), (int8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vx_i16m1(op0, op1, op2) \
__builtin_rvv_vadd_vx_i16m1((vint16m1_t)(op0), (int16_t)(op1), (size_t)(op2))
#define vadd_vx_i16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i16m1_m((vint16m1_t)(op0), (vint16m1_t)(op1), (int16_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vx_i16m2(op0, op1, op2) \
__builtin_rvv_vadd_vx_i16m2((vint16m2_t)(op0), (int16_t)(op1), (size_t)(op2))
#define vadd_vx_i16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i16m2_m((vint16m2_t)(op0), (vint16m2_t)(op1), (int16_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vx_i16m4(op0, op1, op2) \
__builtin_rvv_vadd_vx_i16m4((vint16m4_t)(op0), (int16_t)(op1), (size_t)(op2))
#define vadd_vx_i16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i16m4_m((vint16m4_t)(op0), (vint16m4_t)(op1), (int16_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vx_i16m8(op0, op1, op2) \
__builtin_rvv_vadd_vx_i16m8((vint16m8_t)(op0), (int16_t)(op1), (size_t)(op2))
#define vadd_vx_i16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i16m8_m((vint16m8_t)(op0), (vint16m8_t)(op1), (int16_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vadd_vx_i16mf2(op0, op1, op2) \
__builtin_rvv_vadd_vx_i16mf2((vint16mf2_t)(op0), (int16_t)(op1), (size_t)(op2))
#define vadd_vx_i16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i16mf2_m((vint16mf2_t)(op0), (vint16mf2_t)(op1), (int16_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vx_i16mf4(op0, op1, op2) \
__builtin_rvv_vadd_vx_i16mf4((vint16mf4_t)(op0), (int16_t)(op1), (size_t)(op2))
#define vadd_vx_i16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i16mf4_m((vint16mf4_t)(op0), (vint16mf4_t)(op1), (int16_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vx_i32m1(op0, op1, op2) \
__builtin_rvv_vadd_vx_i32m1((vint32m1_t)(op0), (int32_t)(op1), (size_t)(op2))
#define vadd_vx_i32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i32m1_m((vint32m1_t)(op0), (vint32m1_t)(op1), (int32_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vx_i32m2(op0, op1, op2) \
__builtin_rvv_vadd_vx_i32m2((vint32m2_t)(op0), (int32_t)(op1), (size_t)(op2))
#define vadd_vx_i32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i32m2_m((vint32m2_t)(op0), (vint32m2_t)(op1), (int32_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vx_i32m4(op0, op1, op2) \
__builtin_rvv_vadd_vx_i32m4((vint32m4_t)(op0), (int32_t)(op1), (size_t)(op2))
#define vadd_vx_i32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i32m4_m((vint32m4_t)(op0), (vint32m4_t)(op1), (int32_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vx_i32m8(op0, op1, op2) \
__builtin_rvv_vadd_vx_i32m8((vint32m8_t)(op0), (int32_t)(op1), (size_t)(op2))
#define vadd_vx_i32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i32m8_m((vint32m8_t)(op0), (vint32m8_t)(op1), (int32_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vx_i32mf2(op0, op1, op2) \
__builtin_rvv_vadd_vx_i32mf2((vint32mf2_t)(op0), (int32_t)(op1), (size_t)(op2))
#define vadd_vx_i32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i32mf2_m((vint32mf2_t)(op0), (vint32mf2_t)(op1), (int32_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vx_i64m1(op0, op1, op2) \
__builtin_rvv_vadd_vx_i64m1((vint64m1_t)(op0), (int64_t)(op1), (size_t)(op2))
#define vadd_vx_i64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i64m1_m((vint64m1_t)(op0), (vint64m1_t)(op1), (int64_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vx_i64m2(op0, op1, op2) \
__builtin_rvv_vadd_vx_i64m2((vint64m2_t)(op0), (int64_t)(op1), (size_t)(op2))
#define vadd_vx_i64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i64m2_m((vint64m2_t)(op0), (vint64m2_t)(op1), (int64_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vx_i64m4(op0, op1, op2) \
__builtin_rvv_vadd_vx_i64m4((vint64m4_t)(op0), (int64_t)(op1), (size_t)(op2))
#define vadd_vx_i64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i64m4_m((vint64m4_t)(op0), (vint64m4_t)(op1), (int64_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vx_i64m8(op0, op1, op2) \
__builtin_rvv_vadd_vx_i64m8((vint64m8_t)(op0), (int64_t)(op1), (size_t)(op2))
#define vadd_vx_i64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_i64m8_m((vint64m8_t)(op0), (vint64m8_t)(op1), (int64_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vse8_v_i8m1(op1, op0, op2) \
__builtin_rvv_vse8_v_i8m1((vint8m1_t)(op0), (int8_t *)(op1), (size_t)(op2))
#define vse8_v_i8m1_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_i8m1_m((vint8m1_t)(op0), (int8_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vse8_v_i8m2(op1, op0, op2) \
__builtin_rvv_vse8_v_i8m2((vint8m2_t)(op0), (int8_t *)(op1), (size_t)(op2))
#define vse8_v_i8m2_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_i8m2_m((vint8m2_t)(op0), (int8_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vse8_v_i8m4(op1, op0, op2) \
__builtin_rvv_vse8_v_i8m4((vint8m4_t)(op0), (int8_t *)(op1), (size_t)(op2))
#define vse8_v_i8m4_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_i8m4_m((vint8m4_t)(op0), (int8_t *)(op1), (vbool2_t)(op2), (size_t)(op3))
#define vse8_v_i8m8(op1, op0, op2) \
__builtin_rvv_vse8_v_i8m8((vint8m8_t)(op0), (int8_t *)(op1), (size_t)(op2))
#define vse8_v_i8m8_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_i8m8_m((vint8m8_t)(op0), (int8_t *)(op1), (vbool1_t)(op2), (size_t)(op3))
#define vse8_v_i8mf2(op1, op0, op2) \
__builtin_rvv_vse8_v_i8mf2((vint8mf2_t)(op0), (int8_t *)(op1), (size_t)(op2))
#define vse8_v_i8mf2_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_i8mf2_m((vint8mf2_t)(op0), (int8_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vse8_v_i8mf4(op1, op0, op2) \
__builtin_rvv_vse8_v_i8mf4((vint8mf4_t)(op0), (int8_t *)(op1), (size_t)(op2))
#define vse8_v_i8mf4_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_i8mf4_m((vint8mf4_t)(op0), (int8_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vse8_v_i8mf8(op1, op0, op2) \
__builtin_rvv_vse8_v_i8mf8((vint8mf8_t)(op0), (int8_t *)(op1), (size_t)(op2))
#define vse8_v_i8mf8_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_i8mf8_m((vint8mf8_t)(op0), (int8_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vadd_vv_u8m1(op0, op1, op2) \
__builtin_rvv_vadd_vv_u8m1((vuint8m1_t)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vadd_vv_u8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u8m1_m((vuint8m1_t)(op0), (vuint8m1_t)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vv_u8m2(op0, op1, op2) \
__builtin_rvv_vadd_vv_u8m2((vuint8m2_t)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vadd_vv_u8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u8m2_m((vuint8m2_t)(op0), (vuint8m2_t)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vv_u8m4(op0, op1, op2) \
__builtin_rvv_vadd_vv_u8m4((vuint8m4_t)(op0), (vuint8m4_t)(op1), (size_t)(op2))
#define vadd_vv_u8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u8m4_m((vuint8m4_t)(op0), (vuint8m4_t)(op1), (vuint8m4_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vadd_vv_u8m8(op0, op1, op2) \
__builtin_rvv_vadd_vv_u8m8((vuint8m8_t)(op0), (vuint8m8_t)(op1), (size_t)(op2))
#define vadd_vv_u8m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u8m8_m((vuint8m8_t)(op0), (vuint8m8_t)(op1), (vuint8m8_t)(op2), (vbool1_t)(op3), (size_t)(op4))
#define vadd_vv_u8mf2(op0, op1, op2) \
__builtin_rvv_vadd_vv_u8mf2((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vadd_vv_u8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u8mf2_m((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vv_u8mf4(op0, op1, op2) \
__builtin_rvv_vadd_vv_u8mf4((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vadd_vv_u8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u8mf4_m((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vv_u8mf8(op0, op1, op2) \
__builtin_rvv_vadd_vv_u8mf8((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vadd_vv_u8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u8mf8_m((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vv_u16m1(op0, op1, op2) \
__builtin_rvv_vadd_vv_u16m1((vuint16m1_t)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vadd_vv_u16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u16m1_m((vuint16m1_t)(op0), (vuint16m1_t)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vv_u16m2(op0, op1, op2) \
__builtin_rvv_vadd_vv_u16m2((vuint16m2_t)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vadd_vv_u16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u16m2_m((vuint16m2_t)(op0), (vuint16m2_t)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vv_u16m4(op0, op1, op2) \
__builtin_rvv_vadd_vv_u16m4((vuint16m4_t)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vadd_vv_u16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u16m4_m((vuint16m4_t)(op0), (vuint16m4_t)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vv_u16m8(op0, op1, op2) \
__builtin_rvv_vadd_vv_u16m8((vuint16m8_t)(op0), (vuint16m8_t)(op1), (size_t)(op2))
#define vadd_vv_u16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u16m8_m((vuint16m8_t)(op0), (vuint16m8_t)(op1), (vuint16m8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vadd_vv_u16mf2(op0, op1, op2) \
__builtin_rvv_vadd_vv_u16mf2((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vadd_vv_u16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u16mf2_m((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vv_u16mf4(op0, op1, op2) \
__builtin_rvv_vadd_vv_u16mf4((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vadd_vv_u16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u16mf4_m((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vv_u32m1(op0, op1, op2) \
__builtin_rvv_vadd_vv_u32m1((vuint32m1_t)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vadd_vv_u32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u32m1_m((vuint32m1_t)(op0), (vuint32m1_t)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vv_u32m2(op0, op1, op2) \
__builtin_rvv_vadd_vv_u32m2((vuint32m2_t)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vadd_vv_u32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u32m2_m((vuint32m2_t)(op0), (vuint32m2_t)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vv_u32m4(op0, op1, op2) \
__builtin_rvv_vadd_vv_u32m4((vuint32m4_t)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vadd_vv_u32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u32m4_m((vuint32m4_t)(op0), (vuint32m4_t)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vv_u32m8(op0, op1, op2) \
__builtin_rvv_vadd_vv_u32m8((vuint32m8_t)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vadd_vv_u32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u32m8_m((vuint32m8_t)(op0), (vuint32m8_t)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vv_u32mf2(op0, op1, op2) \
__builtin_rvv_vadd_vv_u32mf2((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vadd_vv_u32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u32mf2_m((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vv_u64m1(op0, op1, op2) \
__builtin_rvv_vadd_vv_u64m1((vuint64m1_t)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vadd_vv_u64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u64m1_m((vuint64m1_t)(op0), (vuint64m1_t)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vv_u64m2(op0, op1, op2) \
__builtin_rvv_vadd_vv_u64m2((vuint64m2_t)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vadd_vv_u64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u64m2_m((vuint64m2_t)(op0), (vuint64m2_t)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vv_u64m4(op0, op1, op2) \
__builtin_rvv_vadd_vv_u64m4((vuint64m4_t)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vadd_vv_u64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u64m4_m((vuint64m4_t)(op0), (vuint64m4_t)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vv_u64m8(op0, op1, op2) \
__builtin_rvv_vadd_vv_u64m8((vuint64m8_t)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vadd_vv_u64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vv_u64m8_m((vuint64m8_t)(op0), (vuint64m8_t)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vx_u8m1(op0, op1, op2) \
__builtin_rvv_vadd_vx_u8m1((vuint8m1_t)(op0), (uint8_t)(op1), (size_t)(op2))
#define vadd_vx_u8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u8m1_m((vuint8m1_t)(op0), (vuint8m1_t)(op1), (uint8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vx_u8m2(op0, op1, op2) \
__builtin_rvv_vadd_vx_u8m2((vuint8m2_t)(op0), (uint8_t)(op1), (size_t)(op2))
#define vadd_vx_u8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u8m2_m((vuint8m2_t)(op0), (vuint8m2_t)(op1), (uint8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vx_u8m4(op0, op1, op2) \
__builtin_rvv_vadd_vx_u8m4((vuint8m4_t)(op0), (uint8_t)(op1), (size_t)(op2))
#define vadd_vx_u8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u8m4_m((vuint8m4_t)(op0), (vuint8m4_t)(op1), (uint8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vadd_vx_u8m8(op0, op1, op2) \
__builtin_rvv_vadd_vx_u8m8((vuint8m8_t)(op0), (uint8_t)(op1), (size_t)(op2))
#define vadd_vx_u8m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u8m8_m((vuint8m8_t)(op0), (vuint8m8_t)(op1), (uint8_t)(op2), (vbool1_t)(op3), (size_t)(op4))
#define vadd_vx_u8mf2(op0, op1, op2) \
__builtin_rvv_vadd_vx_u8mf2((vuint8mf2_t)(op0), (uint8_t)(op1), (size_t)(op2))
#define vadd_vx_u8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u8mf2_m((vuint8mf2_t)(op0), (vuint8mf2_t)(op1), (uint8_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vx_u8mf4(op0, op1, op2) \
__builtin_rvv_vadd_vx_u8mf4((vuint8mf4_t)(op0), (uint8_t)(op1), (size_t)(op2))
#define vadd_vx_u8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u8mf4_m((vuint8mf4_t)(op0), (vuint8mf4_t)(op1), (uint8_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vx_u8mf8(op0, op1, op2) \
__builtin_rvv_vadd_vx_u8mf8((vuint8mf8_t)(op0), (uint8_t)(op1), (size_t)(op2))
#define vadd_vx_u8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u8mf8_m((vuint8mf8_t)(op0), (vuint8mf8_t)(op1), (uint8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vx_u16m1(op0, op1, op2) \
__builtin_rvv_vadd_vx_u16m1((vuint16m1_t)(op0), (uint16_t)(op1), (size_t)(op2))
#define vadd_vx_u16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u16m1_m((vuint16m1_t)(op0), (vuint16m1_t)(op1), (uint16_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vx_u16m2(op0, op1, op2) \
__builtin_rvv_vadd_vx_u16m2((vuint16m2_t)(op0), (uint16_t)(op1), (size_t)(op2))
#define vadd_vx_u16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u16m2_m((vuint16m2_t)(op0), (vuint16m2_t)(op1), (uint16_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vx_u16m4(op0, op1, op2) \
__builtin_rvv_vadd_vx_u16m4((vuint16m4_t)(op0), (uint16_t)(op1), (size_t)(op2))
#define vadd_vx_u16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u16m4_m((vuint16m4_t)(op0), (vuint16m4_t)(op1), (uint16_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vx_u16m8(op0, op1, op2) \
__builtin_rvv_vadd_vx_u16m8((vuint16m8_t)(op0), (uint16_t)(op1), (size_t)(op2))
#define vadd_vx_u16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u16m8_m((vuint16m8_t)(op0), (vuint16m8_t)(op1), (uint16_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vadd_vx_u16mf2(op0, op1, op2) \
__builtin_rvv_vadd_vx_u16mf2((vuint16mf2_t)(op0), (uint16_t)(op1), (size_t)(op2))
#define vadd_vx_u16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u16mf2_m((vuint16mf2_t)(op0), (vuint16mf2_t)(op1), (uint16_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vx_u16mf4(op0, op1, op2) \
__builtin_rvv_vadd_vx_u16mf4((vuint16mf4_t)(op0), (uint16_t)(op1), (size_t)(op2))
#define vadd_vx_u16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u16mf4_m((vuint16mf4_t)(op0), (vuint16mf4_t)(op1), (uint16_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vx_u32m1(op0, op1, op2) \
__builtin_rvv_vadd_vx_u32m1((vuint32m1_t)(op0), (uint32_t)(op1), (size_t)(op2))
#define vadd_vx_u32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u32m1_m((vuint32m1_t)(op0), (vuint32m1_t)(op1), (uint32_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vx_u32m2(op0, op1, op2) \
__builtin_rvv_vadd_vx_u32m2((vuint32m2_t)(op0), (uint32_t)(op1), (size_t)(op2))
#define vadd_vx_u32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u32m2_m((vuint32m2_t)(op0), (vuint32m2_t)(op1), (uint32_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vx_u32m4(op0, op1, op2) \
__builtin_rvv_vadd_vx_u32m4((vuint32m4_t)(op0), (uint32_t)(op1), (size_t)(op2))
#define vadd_vx_u32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u32m4_m((vuint32m4_t)(op0), (vuint32m4_t)(op1), (uint32_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vadd_vx_u32m8(op0, op1, op2) \
__builtin_rvv_vadd_vx_u32m8((vuint32m8_t)(op0), (uint32_t)(op1), (size_t)(op2))
#define vadd_vx_u32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u32m8_m((vuint32m8_t)(op0), (vuint32m8_t)(op1), (uint32_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vadd_vx_u32mf2(op0, op1, op2) \
__builtin_rvv_vadd_vx_u32mf2((vuint32mf2_t)(op0), (uint32_t)(op1), (size_t)(op2))
#define vadd_vx_u32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u32mf2_m((vuint32mf2_t)(op0), (vuint32mf2_t)(op1), (uint32_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vx_u64m1(op0, op1, op2) \
__builtin_rvv_vadd_vx_u64m1((vuint64m1_t)(op0), (uint64_t)(op1), (size_t)(op2))
#define vadd_vx_u64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u64m1_m((vuint64m1_t)(op0), (vuint64m1_t)(op1), (uint64_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vadd_vx_u64m2(op0, op1, op2) \
__builtin_rvv_vadd_vx_u64m2((vuint64m2_t)(op0), (uint64_t)(op1), (size_t)(op2))
#define vadd_vx_u64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u64m2_m((vuint64m2_t)(op0), (vuint64m2_t)(op1), (uint64_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vadd_vx_u64m4(op0, op1, op2) \
__builtin_rvv_vadd_vx_u64m4((vuint64m4_t)(op0), (uint64_t)(op1), (size_t)(op2))
#define vadd_vx_u64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u64m4_m((vuint64m4_t)(op0), (vuint64m4_t)(op1), (uint64_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vadd_vx_u64m8(op0, op1, op2) \
__builtin_rvv_vadd_vx_u64m8((vuint64m8_t)(op0), (uint64_t)(op1), (size_t)(op2))
#define vadd_vx_u64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vadd_vx_u64m8_m((vuint64m8_t)(op0), (vuint64m8_t)(op1), (uint64_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vse8_v_u8m1(op1, op0, op2) \
__builtin_rvv_vse8_v_u8m1((vuint8m1_t)(op0), (uint8_t *)(op1), (size_t)(op2))
#define vse8_v_u8m1_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_u8m1_m((vuint8m1_t)(op0), (uint8_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vse8_v_u8m2(op1, op0, op2) \
__builtin_rvv_vse8_v_u8m2((vuint8m2_t)(op0), (uint8_t *)(op1), (size_t)(op2))
#define vse8_v_u8m2_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_u8m2_m((vuint8m2_t)(op0), (uint8_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vse8_v_u8m4(op1, op0, op2) \
__builtin_rvv_vse8_v_u8m4((vuint8m4_t)(op0), (uint8_t *)(op1), (size_t)(op2))
#define vse8_v_u8m4_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_u8m4_m((vuint8m4_t)(op0), (uint8_t *)(op1), (vbool2_t)(op2), (size_t)(op3))
#define vse8_v_u8m8(op1, op0, op2) \
__builtin_rvv_vse8_v_u8m8((vuint8m8_t)(op0), (uint8_t *)(op1), (size_t)(op2))
#define vse8_v_u8m8_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_u8m8_m((vuint8m8_t)(op0), (uint8_t *)(op1), (vbool1_t)(op2), (size_t)(op3))
#define vse8_v_u8mf2(op1, op0, op2) \
__builtin_rvv_vse8_v_u8mf2((vuint8mf2_t)(op0), (uint8_t *)(op1), (size_t)(op2))
#define vse8_v_u8mf2_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_u8mf2_m((vuint8mf2_t)(op0), (uint8_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vse8_v_u8mf4(op1, op0, op2) \
__builtin_rvv_vse8_v_u8mf4((vuint8mf4_t)(op0), (uint8_t *)(op1), (size_t)(op2))
#define vse8_v_u8mf4_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_u8mf4_m((vuint8mf4_t)(op0), (uint8_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vse8_v_u8mf8(op1, op0, op2) \
__builtin_rvv_vse8_v_u8mf8((vuint8mf8_t)(op0), (uint8_t *)(op1), (size_t)(op2))
#define vse8_v_u8mf8_m(op2, op1, op0, op3) \
__builtin_rvv_vse8_v_u8mf8_m((vuint8mf8_t)(op0), (uint8_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vle16_v_i16m1(op0, op1) \
__builtin_rvv_vle16_v_i16m1((const int16_t *)(op0), (size_t)(op1))
#define vle16_v_i16m1_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_i16m1_m((vint16m1_t)(op0), (const int16_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vle16_v_i16m2(op0, op1) \
__builtin_rvv_vle16_v_i16m2((const int16_t *)(op0), (size_t)(op1))
#define vle16_v_i16m2_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_i16m2_m((vint16m2_t)(op0), (const int16_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vle16_v_i16m4(op0, op1) \
__builtin_rvv_vle16_v_i16m4((const int16_t *)(op0), (size_t)(op1))
#define vle16_v_i16m4_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_i16m4_m((vint16m4_t)(op0), (const int16_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vle16_v_i16m8(op0, op1) \
__builtin_rvv_vle16_v_i16m8((const int16_t *)(op0), (size_t)(op1))
#define vle16_v_i16m8_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_i16m8_m((vint16m8_t)(op0), (const int16_t *)(op1), (vbool2_t)(op2), (size_t)(op3))
#define vle16_v_i16mf2(op0, op1) \
__builtin_rvv_vle16_v_i16mf2((const int16_t *)(op0), (size_t)(op1))
#define vle16_v_i16mf2_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_i16mf2_m((vint16mf2_t)(op0), (const int16_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vle16_v_i16mf4(op0, op1) \
__builtin_rvv_vle16_v_i16mf4((const int16_t *)(op0), (size_t)(op1))
#define vle16_v_i16mf4_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_i16mf4_m((vint16mf4_t)(op0), (const int16_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vle16_v_u16m1(op0, op1) \
__builtin_rvv_vle16_v_u16m1((const uint16_t *)(op0), (size_t)(op1))
#define vle16_v_u16m1_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_u16m1_m((vuint16m1_t)(op0), (const uint16_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vle16_v_u16m2(op0, op1) \
__builtin_rvv_vle16_v_u16m2((const uint16_t *)(op0), (size_t)(op1))
#define vle16_v_u16m2_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_u16m2_m((vuint16m2_t)(op0), (const uint16_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vle16_v_u16m4(op0, op1) \
__builtin_rvv_vle16_v_u16m4((const uint16_t *)(op0), (size_t)(op1))
#define vle16_v_u16m4_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_u16m4_m((vuint16m4_t)(op0), (const uint16_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vle16_v_u16m8(op0, op1) \
__builtin_rvv_vle16_v_u16m8((const uint16_t *)(op0), (size_t)(op1))
#define vle16_v_u16m8_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_u16m8_m((vuint16m8_t)(op0), (const uint16_t *)(op1), (vbool2_t)(op2), (size_t)(op3))
#define vle16_v_u16mf2(op0, op1) \
__builtin_rvv_vle16_v_u16mf2((const uint16_t *)(op0), (size_t)(op1))
#define vle16_v_u16mf2_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_u16mf2_m((vuint16mf2_t)(op0), (const uint16_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vle16_v_u16mf4(op0, op1) \
__builtin_rvv_vle16_v_u16mf4((const uint16_t *)(op0), (size_t)(op1))
#define vle16_v_u16mf4_m(op2, op0, op1, op3) \
__builtin_rvv_vle16_v_u16mf4_m((vuint16mf4_t)(op0), (const uint16_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vle32_v_i32m1(op0, op1) \
__builtin_rvv_vle32_v_i32m1((const int32_t *)(op0), (size_t)(op1))
#define vle32_v_i32m1_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_i32m1_m((vint32m1_t)(op0), (const int32_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vle32_v_i32m2(op0, op1) \
__builtin_rvv_vle32_v_i32m2((const int32_t *)(op0), (size_t)(op1))
#define vle32_v_i32m2_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_i32m2_m((vint32m2_t)(op0), (const int32_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vle32_v_i32m4(op0, op1) \
__builtin_rvv_vle32_v_i32m4((const int32_t *)(op0), (size_t)(op1))
#define vle32_v_i32m4_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_i32m4_m((vint32m4_t)(op0), (const int32_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vle32_v_i32m8(op0, op1) \
__builtin_rvv_vle32_v_i32m8((const int32_t *)(op0), (size_t)(op1))
#define vle32_v_i32m8_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_i32m8_m((vint32m8_t)(op0), (const int32_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vle32_v_i32mf2(op0, op1) \
__builtin_rvv_vle32_v_i32mf2((const int32_t *)(op0), (size_t)(op1))
#define vle32_v_i32mf2_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_i32mf2_m((vint32mf2_t)(op0), (const int32_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vle32_v_u32m1(op0, op1) \
__builtin_rvv_vle32_v_u32m1((const uint32_t *)(op0), (size_t)(op1))
#define vle32_v_u32m1_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_u32m1_m((vuint32m1_t)(op0), (const uint32_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vle32_v_u32m2(op0, op1) \
__builtin_rvv_vle32_v_u32m2((const uint32_t *)(op0), (size_t)(op1))
#define vle32_v_u32m2_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_u32m2_m((vuint32m2_t)(op0), (const uint32_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vle32_v_u32m4(op0, op1) \
__builtin_rvv_vle32_v_u32m4((const uint32_t *)(op0), (size_t)(op1))
#define vle32_v_u32m4_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_u32m4_m((vuint32m4_t)(op0), (const uint32_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vle32_v_u32m8(op0, op1) \
__builtin_rvv_vle32_v_u32m8((const uint32_t *)(op0), (size_t)(op1))
#define vle32_v_u32m8_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_u32m8_m((vuint32m8_t)(op0), (const uint32_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vle32_v_u32mf2(op0, op1) \
__builtin_rvv_vle32_v_u32mf2((const uint32_t *)(op0), (size_t)(op1))
#define vle32_v_u32mf2_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_u32mf2_m((vuint32mf2_t)(op0), (const uint32_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vle64_v_i64m1(op0, op1) \
__builtin_rvv_vle64_v_i64m1((const int64_t *)(op0), (size_t)(op1))
#define vle64_v_i64m1_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_i64m1_m((vint64m1_t)(op0), (const int64_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vle64_v_i64m2(op0, op1) \
__builtin_rvv_vle64_v_i64m2((const int64_t *)(op0), (size_t)(op1))
#define vle64_v_i64m2_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_i64m2_m((vint64m2_t)(op0), (const int64_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vle64_v_i64m4(op0, op1) \
__builtin_rvv_vle64_v_i64m4((const int64_t *)(op0), (size_t)(op1))
#define vle64_v_i64m4_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_i64m4_m((vint64m4_t)(op0), (const int64_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vle64_v_i64m8(op0, op1) \
__builtin_rvv_vle64_v_i64m8((const int64_t *)(op0), (size_t)(op1))
#define vle64_v_i64m8_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_i64m8_m((vint64m8_t)(op0), (const int64_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vle64_v_u64m1(op0, op1) \
__builtin_rvv_vle64_v_u64m1((const uint64_t *)(op0), (size_t)(op1))
#define vle64_v_u64m1_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_u64m1_m((vuint64m1_t)(op0), (const uint64_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vle64_v_u64m2(op0, op1) \
__builtin_rvv_vle64_v_u64m2((const uint64_t *)(op0), (size_t)(op1))
#define vle64_v_u64m2_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_u64m2_m((vuint64m2_t)(op0), (const uint64_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vle64_v_u64m4(op0, op1) \
__builtin_rvv_vle64_v_u64m4((const uint64_t *)(op0), (size_t)(op1))
#define vle64_v_u64m4_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_u64m4_m((vuint64m4_t)(op0), (const uint64_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vle64_v_u64m8(op0, op1) \
__builtin_rvv_vle64_v_u64m8((const uint64_t *)(op0), (size_t)(op1))
#define vle64_v_u64m8_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_u64m8_m((vuint64m8_t)(op0), (const uint64_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vle8_v_u8m1(op0, op1) \
__builtin_rvv_vle8_v_u8m1((const uint8_t *)(op0), (size_t)(op1))
#define vle8_v_u8m1_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_u8m1_m((vuint8m1_t)(op0), (const uint8_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vle8_v_u8m2(op0, op1) \
__builtin_rvv_vle8_v_u8m2((const uint8_t *)(op0), (size_t)(op1))
#define vle8_v_u8m2_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_u8m2_m((vuint8m2_t)(op0), (const uint8_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vle8_v_u8m4(op0, op1) \
__builtin_rvv_vle8_v_u8m4((const uint8_t *)(op0), (size_t)(op1))
#define vle8_v_u8m4_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_u8m4_m((vuint8m4_t)(op0), (const uint8_t *)(op1), (vbool2_t)(op2), (size_t)(op3))
#define vle8_v_u8m8(op0, op1) \
__builtin_rvv_vle8_v_u8m8((const uint8_t *)(op0), (size_t)(op1))
#define vle8_v_u8m8_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_u8m8_m((vuint8m8_t)(op0), (const uint8_t *)(op1), (vbool1_t)(op2), (size_t)(op3))
#define vle8_v_u8mf2(op0, op1) \
__builtin_rvv_vle8_v_u8mf2((const uint8_t *)(op0), (size_t)(op1))
#define vle8_v_u8mf2_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_u8mf2_m((vuint8mf2_t)(op0), (const uint8_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vle8_v_u8mf4(op0, op1) \
__builtin_rvv_vle8_v_u8mf4((const uint8_t *)(op0), (size_t)(op1))
#define vle8_v_u8mf4_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_u8mf4_m((vuint8mf4_t)(op0), (const uint8_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vle8_v_u8mf8(op0, op1) \
__builtin_rvv_vle8_v_u8mf8((const uint8_t *)(op0), (size_t)(op1))
#define vle8_v_u8mf8_m(op2, op0, op1, op3) \
__builtin_rvv_vle8_v_u8mf8_m((vuint8mf8_t)(op0), (const uint8_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vse16_v_i16m1(op1, op0, op2) \
__builtin_rvv_vse16_v_i16m1((vint16m1_t)(op0), (int16_t *)(op1), (size_t)(op2))
#define vse16_v_i16m1_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_i16m1_m((vint16m1_t)(op0), (int16_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vse16_v_i16m2(op1, op0, op2) \
__builtin_rvv_vse16_v_i16m2((vint16m2_t)(op0), (int16_t *)(op1), (size_t)(op2))
#define vse16_v_i16m2_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_i16m2_m((vint16m2_t)(op0), (int16_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vse16_v_i16m4(op1, op0, op2) \
__builtin_rvv_vse16_v_i16m4((vint16m4_t)(op0), (int16_t *)(op1), (size_t)(op2))
#define vse16_v_i16m4_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_i16m4_m((vint16m4_t)(op0), (int16_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vse16_v_i16m8(op1, op0, op2) \
__builtin_rvv_vse16_v_i16m8((vint16m8_t)(op0), (int16_t *)(op1), (size_t)(op2))
#define vse16_v_i16m8_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_i16m8_m((vint16m8_t)(op0), (int16_t *)(op1), (vbool2_t)(op2), (size_t)(op3))
#define vse16_v_i16mf2(op1, op0, op2) \
__builtin_rvv_vse16_v_i16mf2((vint16mf2_t)(op0), (int16_t *)(op1), (size_t)(op2))
#define vse16_v_i16mf2_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_i16mf2_m((vint16mf2_t)(op0), (int16_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vse16_v_i16mf4(op1, op0, op2) \
__builtin_rvv_vse16_v_i16mf4((vint16mf4_t)(op0), (int16_t *)(op1), (size_t)(op2))
#define vse16_v_i16mf4_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_i16mf4_m((vint16mf4_t)(op0), (int16_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vse16_v_u16m1(op1, op0, op2) \
__builtin_rvv_vse16_v_u16m1((vuint16m1_t)(op0), (uint16_t *)(op1), (size_t)(op2))
#define vse16_v_u16m1_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_u16m1_m((vuint16m1_t)(op0), (uint16_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vse16_v_u16m2(op1, op0, op2) \
__builtin_rvv_vse16_v_u16m2((vuint16m2_t)(op0), (uint16_t *)(op1), (size_t)(op2))
#define vse16_v_u16m2_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_u16m2_m((vuint16m2_t)(op0), (uint16_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vse16_v_u16m4(op1, op0, op2) \
__builtin_rvv_vse16_v_u16m4((vuint16m4_t)(op0), (uint16_t *)(op1), (size_t)(op2))
#define vse16_v_u16m4_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_u16m4_m((vuint16m4_t)(op0), (uint16_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vse16_v_u16m8(op1, op0, op2) \
__builtin_rvv_vse16_v_u16m8((vuint16m8_t)(op0), (uint16_t *)(op1), (size_t)(op2))
#define vse16_v_u16m8_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_u16m8_m((vuint16m8_t)(op0), (uint16_t *)(op1), (vbool2_t)(op2), (size_t)(op3))
#define vse16_v_u16mf2(op1, op0, op2) \
__builtin_rvv_vse16_v_u16mf2((vuint16mf2_t)(op0), (uint16_t *)(op1), (size_t)(op2))
#define vse16_v_u16mf2_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_u16mf2_m((vuint16mf2_t)(op0), (uint16_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vse16_v_u16mf4(op1, op0, op2) \
__builtin_rvv_vse16_v_u16mf4((vuint16mf4_t)(op0), (uint16_t *)(op1), (size_t)(op2))
#define vse16_v_u16mf4_m(op2, op1, op0, op3) \
__builtin_rvv_vse16_v_u16mf4_m((vuint16mf4_t)(op0), (uint16_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vse32_v_i32m1(op1, op0, op2) \
__builtin_rvv_vse32_v_i32m1((vint32m1_t)(op0), (int32_t *)(op1), (size_t)(op2))
#define vse32_v_i32m1_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_i32m1_m((vint32m1_t)(op0), (int32_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vse32_v_i32m2(op1, op0, op2) \
__builtin_rvv_vse32_v_i32m2((vint32m2_t)(op0), (int32_t *)(op1), (size_t)(op2))
#define vse32_v_i32m2_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_i32m2_m((vint32m2_t)(op0), (int32_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vse32_v_i32m4(op1, op0, op2) \
__builtin_rvv_vse32_v_i32m4((vint32m4_t)(op0), (int32_t *)(op1), (size_t)(op2))
#define vse32_v_i32m4_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_i32m4_m((vint32m4_t)(op0), (int32_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vse32_v_i32m8(op1, op0, op2) \
__builtin_rvv_vse32_v_i32m8((vint32m8_t)(op0), (int32_t *)(op1), (size_t)(op2))
#define vse32_v_i32m8_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_i32m8_m((vint32m8_t)(op0), (int32_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vse32_v_i32mf2(op1, op0, op2) \
__builtin_rvv_vse32_v_i32mf2((vint32mf2_t)(op0), (int32_t *)(op1), (size_t)(op2))
#define vse32_v_i32mf2_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_i32mf2_m((vint32mf2_t)(op0), (int32_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vse32_v_u32m1(op1, op0, op2) \
__builtin_rvv_vse32_v_u32m1((vuint32m1_t)(op0), (uint32_t *)(op1), (size_t)(op2))
#define vse32_v_u32m1_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_u32m1_m((vuint32m1_t)(op0), (uint32_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vse32_v_u32m2(op1, op0, op2) \
__builtin_rvv_vse32_v_u32m2((vuint32m2_t)(op0), (uint32_t *)(op1), (size_t)(op2))
#define vse32_v_u32m2_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_u32m2_m((vuint32m2_t)(op0), (uint32_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vse32_v_u32m4(op1, op0, op2) \
__builtin_rvv_vse32_v_u32m4((vuint32m4_t)(op0), (uint32_t *)(op1), (size_t)(op2))
#define vse32_v_u32m4_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_u32m4_m((vuint32m4_t)(op0), (uint32_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vse32_v_u32m8(op1, op0, op2) \
__builtin_rvv_vse32_v_u32m8((vuint32m8_t)(op0), (uint32_t *)(op1), (size_t)(op2))
#define vse32_v_u32m8_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_u32m8_m((vuint32m8_t)(op0), (uint32_t *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vse32_v_u32mf2(op1, op0, op2) \
__builtin_rvv_vse32_v_u32mf2((vuint32mf2_t)(op0), (uint32_t *)(op1), (size_t)(op2))
#define vse32_v_u32mf2_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_u32mf2_m((vuint32mf2_t)(op0), (uint32_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vse64_v_i64m1(op1, op0, op2) \
__builtin_rvv_vse64_v_i64m1((vint64m1_t)(op0), (int64_t *)(op1), (size_t)(op2))
#define vse64_v_i64m1_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_i64m1_m((vint64m1_t)(op0), (int64_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vse64_v_i64m2(op1, op0, op2) \
__builtin_rvv_vse64_v_i64m2((vint64m2_t)(op0), (int64_t *)(op1), (size_t)(op2))
#define vse64_v_i64m2_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_i64m2_m((vint64m2_t)(op0), (int64_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vse64_v_i64m4(op1, op0, op2) \
__builtin_rvv_vse64_v_i64m4((vint64m4_t)(op0), (int64_t *)(op1), (size_t)(op2))
#define vse64_v_i64m4_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_i64m4_m((vint64m4_t)(op0), (int64_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vse64_v_i64m8(op1, op0, op2) \
__builtin_rvv_vse64_v_i64m8((vint64m8_t)(op0), (int64_t *)(op1), (size_t)(op2))
#define vse64_v_i64m8_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_i64m8_m((vint64m8_t)(op0), (int64_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vse64_v_u64m1(op1, op0, op2) \
__builtin_rvv_vse64_v_u64m1((vuint64m1_t)(op0), (uint64_t *)(op1), (size_t)(op2))
#define vse64_v_u64m1_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_u64m1_m((vuint64m1_t)(op0), (uint64_t *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vse64_v_u64m2(op1, op0, op2) \
__builtin_rvv_vse64_v_u64m2((vuint64m2_t)(op0), (uint64_t *)(op1), (size_t)(op2))
#define vse64_v_u64m2_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_u64m2_m((vuint64m2_t)(op0), (uint64_t *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vse64_v_u64m4(op1, op0, op2) \
__builtin_rvv_vse64_v_u64m4((vuint64m4_t)(op0), (uint64_t *)(op1), (size_t)(op2))
#define vse64_v_u64m4_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_u64m4_m((vuint64m4_t)(op0), (uint64_t *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vse64_v_u64m8(op1, op0, op2) \
__builtin_rvv_vse64_v_u64m8((vuint64m8_t)(op0), (uint64_t *)(op1), (size_t)(op2))
#define vse64_v_u64m8_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_u64m8_m((vuint64m8_t)(op0), (uint64_t *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vluxei8_v_i8m1(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i8m1((const int8_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vluxei8_v_i8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i8m1_m((vint8m1_t)(op0), (const int8_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei8_v_i8m2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i8m2((const int8_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vluxei8_v_i8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i8m2_m((vint8m2_t)(op0), (const int8_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei8_v_i8m4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i8m4((const int8_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2))
#define vluxei8_v_i8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i8m4_m((vint8m4_t)(op0), (const int8_t *)(op1), (vuint8m4_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vluxei8_v_i8m8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i8m8((const int8_t *)(op0), (vuint8m8_t)(op1), (size_t)(op2))
#define vluxei8_v_i8m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i8m8_m((vint8m8_t)(op0), (const int8_t *)(op1), (vuint8m8_t)(op2), (vbool1_t)(op3), (size_t)(op4))
#define vluxei8_v_i8mf2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i8mf2((const int8_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vluxei8_v_i8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i8mf2_m((vint8mf2_t)(op0), (const int8_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei8_v_i8mf4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i8mf4((const int8_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vluxei8_v_i8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i8mf4_m((vint8mf4_t)(op0), (const int8_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei8_v_i8mf8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i8mf8((const int8_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vluxei8_v_i8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i8mf8_m((vint8mf8_t)(op0), (const int8_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei16_v_i8m1(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i8m1((const int8_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vluxei16_v_i8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i8m1_m((vint8m1_t)(op0), (const int8_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei16_v_i8m2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i8m2((const int8_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vluxei16_v_i8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i8m2_m((vint8m2_t)(op0), (const int8_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei16_v_i8m4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i8m4((const int8_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2))
#define vluxei16_v_i8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i8m4_m((vint8m4_t)(op0), (const int8_t *)(op1), (vuint16m8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vluxei16_v_i8mf2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i8mf2((const int8_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vluxei16_v_i8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i8mf2_m((vint8mf2_t)(op0), (const int8_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei16_v_i8mf4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i8mf4((const int8_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vluxei16_v_i8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i8mf4_m((vint8mf4_t)(op0), (const int8_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei16_v_i8mf8(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i8mf8((const int8_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vluxei16_v_i8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i8mf8_m((vint8mf8_t)(op0), (const int8_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei16_v_u8m1(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u8m1((const uint8_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vluxei16_v_u8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u8m1_m((vuint8m1_t)(op0), (const uint8_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei16_v_u8m2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u8m2((const uint8_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vluxei16_v_u8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u8m2_m((vuint8m2_t)(op0), (const uint8_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei16_v_u8m4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u8m4((const uint8_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2))
#define vluxei16_v_u8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u8m4_m((vuint8m4_t)(op0), (const uint8_t *)(op1), (vuint16m8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vluxei16_v_u8mf2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u8mf2((const uint8_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vluxei16_v_u8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u8mf2_m((vuint8mf2_t)(op0), (const uint8_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei16_v_u8mf4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u8mf4((const uint8_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vluxei16_v_u8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u8mf4_m((vuint8mf4_t)(op0), (const uint8_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei16_v_u8mf8(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u8mf8((const uint8_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vluxei16_v_u8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u8mf8_m((vuint8mf8_t)(op0), (const uint8_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei32_v_i8m1(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i8m1((const int8_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vluxei32_v_i8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i8m1_m((vint8m1_t)(op0), (const int8_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei32_v_i8m2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i8m2((const int8_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vluxei32_v_i8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i8m2_m((vint8m2_t)(op0), (const int8_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei32_v_i8mf2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i8mf2((const int8_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vluxei32_v_i8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i8mf2_m((vint8mf2_t)(op0), (const int8_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei32_v_i8mf4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i8mf4((const int8_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vluxei32_v_i8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i8mf4_m((vint8mf4_t)(op0), (const int8_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei32_v_i8mf8(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i8mf8((const int8_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vluxei32_v_i8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i8mf8_m((vint8mf8_t)(op0), (const int8_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei32_v_u8m1(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u8m1((const uint8_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vluxei32_v_u8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u8m1_m((vuint8m1_t)(op0), (const uint8_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei32_v_u8m2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u8m2((const uint8_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vluxei32_v_u8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u8m2_m((vuint8m2_t)(op0), (const uint8_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei32_v_u8mf2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u8mf2((const uint8_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vluxei32_v_u8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u8mf2_m((vuint8mf2_t)(op0), (const uint8_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei32_v_u8mf4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u8mf4((const uint8_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vluxei32_v_u8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u8mf4_m((vuint8mf4_t)(op0), (const uint8_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei32_v_u8mf8(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u8mf8((const uint8_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vluxei32_v_u8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u8mf8_m((vuint8mf8_t)(op0), (const uint8_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei64_v_i8m1(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i8m1((const int8_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vluxei64_v_i8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i8m1_m((vint8m1_t)(op0), (const int8_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei64_v_i8mf2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i8mf2((const int8_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vluxei64_v_i8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i8mf2_m((vint8mf2_t)(op0), (const int8_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei64_v_i8mf4(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i8mf4((const int8_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vluxei64_v_i8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i8mf4_m((vint8mf4_t)(op0), (const int8_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei64_v_i8mf8(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i8mf8((const int8_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vluxei64_v_i8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i8mf8_m((vint8mf8_t)(op0), (const int8_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei64_v_u8m1(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u8m1((const uint8_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vluxei64_v_u8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u8m1_m((vuint8m1_t)(op0), (const uint8_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei64_v_u8mf2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u8mf2((const uint8_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vluxei64_v_u8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u8mf2_m((vuint8mf2_t)(op0), (const uint8_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei64_v_u8mf4(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u8mf4((const uint8_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vluxei64_v_u8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u8mf4_m((vuint8mf4_t)(op0), (const uint8_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei64_v_u8mf8(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u8mf8((const uint8_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vluxei64_v_u8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u8mf8_m((vuint8mf8_t)(op0), (const uint8_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei8_v_i16m1(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i16m1((const int16_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vluxei8_v_i16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i16m1_m((vint16m1_t)(op0), (const int16_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei8_v_i16m2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i16m2((const int16_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vluxei8_v_i16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i16m2_m((vint16m2_t)(op0), (const int16_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei8_v_i16m4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i16m4((const int16_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vluxei8_v_i16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i16m4_m((vint16m4_t)(op0), (const int16_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei8_v_i16m8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i16m8((const int16_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2))
#define vluxei8_v_i16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i16m8_m((vint16m8_t)(op0), (const int16_t *)(op1), (vuint8m4_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vluxei8_v_i16mf2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i16mf2((const int16_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vluxei8_v_i16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i16mf2_m((vint16mf2_t)(op0), (const int16_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei8_v_i16mf4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i16mf4((const int16_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vluxei8_v_i16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i16mf4_m((vint16mf4_t)(op0), (const int16_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei8_v_u16m1(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u16m1((const uint16_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vluxei8_v_u16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u16m1_m((vuint16m1_t)(op0), (const uint16_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei8_v_u16m2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u16m2((const uint16_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vluxei8_v_u16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u16m2_m((vuint16m2_t)(op0), (const uint16_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei8_v_u16m4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u16m4((const uint16_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vluxei8_v_u16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u16m4_m((vuint16m4_t)(op0), (const uint16_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei8_v_u16m8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u16m8((const uint16_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2))
#define vluxei8_v_u16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u16m8_m((vuint16m8_t)(op0), (const uint16_t *)(op1), (vuint8m4_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vluxei8_v_u16mf2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u16mf2((const uint16_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vluxei8_v_u16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u16mf2_m((vuint16mf2_t)(op0), (const uint16_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei8_v_u16mf4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u16mf4((const uint16_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vluxei8_v_u16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u16mf4_m((vuint16mf4_t)(op0), (const uint16_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei16_v_i16m1(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i16m1((const int16_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vluxei16_v_i16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i16m1_m((vint16m1_t)(op0), (const int16_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei16_v_i16m2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i16m2((const int16_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vluxei16_v_i16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i16m2_m((vint16m2_t)(op0), (const int16_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei16_v_i16m4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i16m4((const int16_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vluxei16_v_i16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i16m4_m((vint16m4_t)(op0), (const int16_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei16_v_i16m8(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i16m8((const int16_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2))
#define vluxei16_v_i16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i16m8_m((vint16m8_t)(op0), (const int16_t *)(op1), (vuint16m8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vluxei16_v_i16mf2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i16mf2((const int16_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vluxei16_v_i16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i16mf2_m((vint16mf2_t)(op0), (const int16_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei16_v_i16mf4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i16mf4((const int16_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vluxei16_v_i16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i16mf4_m((vint16mf4_t)(op0), (const int16_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei16_v_u16m1(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u16m1((const uint16_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vluxei16_v_u16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u16m1_m((vuint16m1_t)(op0), (const uint16_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei16_v_u16m2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u16m2((const uint16_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vluxei16_v_u16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u16m2_m((vuint16m2_t)(op0), (const uint16_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei16_v_u16m4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u16m4((const uint16_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vluxei16_v_u16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u16m4_m((vuint16m4_t)(op0), (const uint16_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei16_v_u16m8(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u16m8((const uint16_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2))
#define vluxei16_v_u16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u16m8_m((vuint16m8_t)(op0), (const uint16_t *)(op1), (vuint16m8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vluxei16_v_u16mf2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u16mf2((const uint16_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vluxei16_v_u16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u16mf2_m((vuint16mf2_t)(op0), (const uint16_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei16_v_u16mf4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u16mf4((const uint16_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vluxei16_v_u16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u16mf4_m((vuint16mf4_t)(op0), (const uint16_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei8_v_u8m1(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u8m1((const uint8_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vluxei8_v_u8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u8m1_m((vuint8m1_t)(op0), (const uint8_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei8_v_u8m2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u8m2((const uint8_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vluxei8_v_u8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u8m2_m((vuint8m2_t)(op0), (const uint8_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei8_v_u8m4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u8m4((const uint8_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2))
#define vluxei8_v_u8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u8m4_m((vuint8m4_t)(op0), (const uint8_t *)(op1), (vuint8m4_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vluxei8_v_u8m8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u8m8((const uint8_t *)(op0), (vuint8m8_t)(op1), (size_t)(op2))
#define vluxei8_v_u8m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u8m8_m((vuint8m8_t)(op0), (const uint8_t *)(op1), (vuint8m8_t)(op2), (vbool1_t)(op3), (size_t)(op4))
#define vluxei8_v_u8mf2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u8mf2((const uint8_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vluxei8_v_u8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u8mf2_m((vuint8mf2_t)(op0), (const uint8_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei8_v_u8mf4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u8mf4((const uint8_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vluxei8_v_u8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u8mf4_m((vuint8mf4_t)(op0), (const uint8_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei8_v_u8mf8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u8mf8((const uint8_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vluxei8_v_u8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u8mf8_m((vuint8mf8_t)(op0), (const uint8_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei32_v_i16m1(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i16m1((const int16_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vluxei32_v_i16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i16m1_m((vint16m1_t)(op0), (const int16_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei32_v_i16m2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i16m2((const int16_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vluxei32_v_i16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i16m2_m((vint16m2_t)(op0), (const int16_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei32_v_i16m4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i16m4((const int16_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vluxei32_v_i16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i16m4_m((vint16m4_t)(op0), (const int16_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei32_v_i16mf2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i16mf2((const int16_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vluxei32_v_i16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i16mf2_m((vint16mf2_t)(op0), (const int16_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei32_v_i16mf4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i16mf4((const int16_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vluxei32_v_i16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i16mf4_m((vint16mf4_t)(op0), (const int16_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei32_v_u16m1(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u16m1((const uint16_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vluxei32_v_u16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u16m1_m((vuint16m1_t)(op0), (const uint16_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei32_v_u16m2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u16m2((const uint16_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vluxei32_v_u16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u16m2_m((vuint16m2_t)(op0), (const uint16_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei32_v_u16m4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u16m4((const uint16_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vluxei32_v_u16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u16m4_m((vuint16m4_t)(op0), (const uint16_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei32_v_u16mf2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u16mf2((const uint16_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vluxei32_v_u16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u16mf2_m((vuint16mf2_t)(op0), (const uint16_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei32_v_u16mf4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u16mf4((const uint16_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vluxei32_v_u16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u16mf4_m((vuint16mf4_t)(op0), (const uint16_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei64_v_i16m1(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i16m1((const int16_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vluxei64_v_i16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i16m1_m((vint16m1_t)(op0), (const int16_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei64_v_i16m2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i16m2((const int16_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vluxei64_v_i16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i16m2_m((vint16m2_t)(op0), (const int16_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei64_v_i16mf2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i16mf2((const int16_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vluxei64_v_i16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i16mf2_m((vint16mf2_t)(op0), (const int16_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei64_v_i16mf4(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i16mf4((const int16_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vluxei64_v_i16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i16mf4_m((vint16mf4_t)(op0), (const int16_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei64_v_u16m1(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u16m1((const uint16_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vluxei64_v_u16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u16m1_m((vuint16m1_t)(op0), (const uint16_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei64_v_u16m2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u16m2((const uint16_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vluxei64_v_u16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u16m2_m((vuint16m2_t)(op0), (const uint16_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei64_v_u16mf2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u16mf2((const uint16_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vluxei64_v_u16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u16mf2_m((vuint16mf2_t)(op0), (const uint16_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei64_v_u16mf4(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u16mf4((const uint16_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vluxei64_v_u16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u16mf4_m((vuint16mf4_t)(op0), (const uint16_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei8_v_i32m1(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i32m1((const int32_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vluxei8_v_i32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i32m1_m((vint32m1_t)(op0), (const int32_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei8_v_i32m2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i32m2((const int32_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vluxei8_v_i32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i32m2_m((vint32m2_t)(op0), (const int32_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei8_v_i32m4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i32m4((const int32_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vluxei8_v_i32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i32m4_m((vint32m4_t)(op0), (const int32_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei8_v_i32m8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i32m8((const int32_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vluxei8_v_i32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i32m8_m((vint32m8_t)(op0), (const int32_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei8_v_i32mf2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i32mf2((const int32_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vluxei8_v_i32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i32mf2_m((vint32mf2_t)(op0), (const int32_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei8_v_u32m1(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u32m1((const uint32_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vluxei8_v_u32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u32m1_m((vuint32m1_t)(op0), (const uint32_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei8_v_u32m2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u32m2((const uint32_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vluxei8_v_u32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u32m2_m((vuint32m2_t)(op0), (const uint32_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei8_v_u32m4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u32m4((const uint32_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vluxei8_v_u32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u32m4_m((vuint32m4_t)(op0), (const uint32_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei8_v_u32m8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u32m8((const uint32_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vluxei8_v_u32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u32m8_m((vuint32m8_t)(op0), (const uint32_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei8_v_u32mf2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u32mf2((const uint32_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vluxei8_v_u32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u32mf2_m((vuint32mf2_t)(op0), (const uint32_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei16_v_i32m1(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i32m1((const int32_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vluxei16_v_i32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i32m1_m((vint32m1_t)(op0), (const int32_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei16_v_i32m2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i32m2((const int32_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vluxei16_v_i32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i32m2_m((vint32m2_t)(op0), (const int32_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei16_v_i32m4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i32m4((const int32_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vluxei16_v_i32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i32m4_m((vint32m4_t)(op0), (const int32_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei16_v_i32m8(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i32m8((const int32_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vluxei16_v_i32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i32m8_m((vint32m8_t)(op0), (const int32_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei16_v_i32mf2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i32mf2((const int32_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vluxei16_v_i32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i32mf2_m((vint32mf2_t)(op0), (const int32_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei16_v_u32m1(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u32m1((const uint32_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vluxei16_v_u32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u32m1_m((vuint32m1_t)(op0), (const uint32_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei16_v_u32m2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u32m2((const uint32_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vluxei16_v_u32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u32m2_m((vuint32m2_t)(op0), (const uint32_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei16_v_u32m4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u32m4((const uint32_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vluxei16_v_u32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u32m4_m((vuint32m4_t)(op0), (const uint32_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei16_v_u32m8(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u32m8((const uint32_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vluxei16_v_u32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u32m8_m((vuint32m8_t)(op0), (const uint32_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei16_v_u32mf2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u32mf2((const uint32_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vluxei16_v_u32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u32mf2_m((vuint32mf2_t)(op0), (const uint32_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei32_v_i32m1(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i32m1((const int32_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vluxei32_v_i32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i32m1_m((vint32m1_t)(op0), (const int32_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei32_v_i32m2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i32m2((const int32_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vluxei32_v_i32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i32m2_m((vint32m2_t)(op0), (const int32_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei32_v_i32m4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i32m4((const int32_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vluxei32_v_i32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i32m4_m((vint32m4_t)(op0), (const int32_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei32_v_i32m8(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i32m8((const int32_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vluxei32_v_i32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i32m8_m((vint32m8_t)(op0), (const int32_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei32_v_i32mf2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i32mf2((const int32_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vluxei32_v_i32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i32mf2_m((vint32mf2_t)(op0), (const int32_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei32_v_u32m1(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u32m1((const uint32_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vluxei32_v_u32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u32m1_m((vuint32m1_t)(op0), (const uint32_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei32_v_u32m2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u32m2((const uint32_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vluxei32_v_u32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u32m2_m((vuint32m2_t)(op0), (const uint32_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei32_v_u32m4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u32m4((const uint32_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vluxei32_v_u32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u32m4_m((vuint32m4_t)(op0), (const uint32_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei32_v_u32m8(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u32m8((const uint32_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vluxei32_v_u32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u32m8_m((vuint32m8_t)(op0), (const uint32_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei32_v_u32mf2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u32mf2((const uint32_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vluxei32_v_u32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u32mf2_m((vuint32mf2_t)(op0), (const uint32_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei64_v_i32m1(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i32m1((const int32_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vluxei64_v_i32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i32m1_m((vint32m1_t)(op0), (const int32_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei64_v_i32m2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i32m2((const int32_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vluxei64_v_i32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i32m2_m((vint32m2_t)(op0), (const int32_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei64_v_i32m4(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i32m4((const int32_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vluxei64_v_i32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i32m4_m((vint32m4_t)(op0), (const int32_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei64_v_i32mf2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i32mf2((const int32_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vluxei64_v_i32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i32mf2_m((vint32mf2_t)(op0), (const int32_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei64_v_u32m1(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u32m1((const uint32_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vluxei64_v_u32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u32m1_m((vuint32m1_t)(op0), (const uint32_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei64_v_u32m2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u32m2((const uint32_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vluxei64_v_u32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u32m2_m((vuint32m2_t)(op0), (const uint32_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei64_v_u32m4(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u32m4((const uint32_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vluxei64_v_u32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u32m4_m((vuint32m4_t)(op0), (const uint32_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei64_v_u32mf2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u32mf2((const uint32_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vluxei64_v_u32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u32mf2_m((vuint32mf2_t)(op0), (const uint32_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei8_v_i64m1(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i64m1((const int64_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vluxei8_v_i64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i64m1_m((vint64m1_t)(op0), (const int64_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei8_v_i64m2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i64m2((const int64_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vluxei8_v_i64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i64m2_m((vint64m2_t)(op0), (const int64_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei8_v_i64m4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i64m4((const int64_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vluxei8_v_i64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i64m4_m((vint64m4_t)(op0), (const int64_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei8_v_i64m8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_i64m8((const int64_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vluxei8_v_i64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_i64m8_m((vint64m8_t)(op0), (const int64_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei8_v_u64m1(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u64m1((const uint64_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vluxei8_v_u64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u64m1_m((vuint64m1_t)(op0), (const uint64_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei8_v_u64m2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u64m2((const uint64_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vluxei8_v_u64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u64m2_m((vuint64m2_t)(op0), (const uint64_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei8_v_u64m4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u64m4((const uint64_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vluxei8_v_u64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u64m4_m((vuint64m4_t)(op0), (const uint64_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei8_v_u64m8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_u64m8((const uint64_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vluxei8_v_u64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_u64m8_m((vuint64m8_t)(op0), (const uint64_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei16_v_i64m1(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i64m1((const int64_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vluxei16_v_i64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i64m1_m((vint64m1_t)(op0), (const int64_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei16_v_i64m2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i64m2((const int64_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vluxei16_v_i64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i64m2_m((vint64m2_t)(op0), (const int64_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei16_v_i64m4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i64m4((const int64_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vluxei16_v_i64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i64m4_m((vint64m4_t)(op0), (const int64_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei16_v_i64m8(op0, op1, op2) \
__builtin_rvv_vluxei16_v_i64m8((const int64_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vluxei16_v_i64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_i64m8_m((vint64m8_t)(op0), (const int64_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei16_v_u64m1(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u64m1((const uint64_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vluxei16_v_u64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u64m1_m((vuint64m1_t)(op0), (const uint64_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei16_v_u64m2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u64m2((const uint64_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vluxei16_v_u64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u64m2_m((vuint64m2_t)(op0), (const uint64_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei16_v_u64m4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u64m4((const uint64_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vluxei16_v_u64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u64m4_m((vuint64m4_t)(op0), (const uint64_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei16_v_u64m8(op0, op1, op2) \
__builtin_rvv_vluxei16_v_u64m8((const uint64_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vluxei16_v_u64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_u64m8_m((vuint64m8_t)(op0), (const uint64_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei32_v_i64m1(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i64m1((const int64_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vluxei32_v_i64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i64m1_m((vint64m1_t)(op0), (const int64_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei32_v_i64m2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i64m2((const int64_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vluxei32_v_i64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i64m2_m((vint64m2_t)(op0), (const int64_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei32_v_i64m4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i64m4((const int64_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vluxei32_v_i64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i64m4_m((vint64m4_t)(op0), (const int64_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei32_v_i64m8(op0, op1, op2) \
__builtin_rvv_vluxei32_v_i64m8((const int64_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vluxei32_v_i64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_i64m8_m((vint64m8_t)(op0), (const int64_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei32_v_u64m1(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u64m1((const uint64_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vluxei32_v_u64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u64m1_m((vuint64m1_t)(op0), (const uint64_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei32_v_u64m2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u64m2((const uint64_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vluxei32_v_u64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u64m2_m((vuint64m2_t)(op0), (const uint64_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei32_v_u64m4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u64m4((const uint64_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vluxei32_v_u64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u64m4_m((vuint64m4_t)(op0), (const uint64_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei32_v_u64m8(op0, op1, op2) \
__builtin_rvv_vluxei32_v_u64m8((const uint64_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vluxei32_v_u64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_u64m8_m((vuint64m8_t)(op0), (const uint64_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei64_v_i64m1(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i64m1((const int64_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vluxei64_v_i64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i64m1_m((vint64m1_t)(op0), (const int64_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei64_v_i64m2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i64m2((const int64_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vluxei64_v_i64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i64m2_m((vint64m2_t)(op0), (const int64_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei64_v_i64m4(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i64m4((const int64_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vluxei64_v_i64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i64m4_m((vint64m4_t)(op0), (const int64_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei64_v_i64m8(op0, op1, op2) \
__builtin_rvv_vluxei64_v_i64m8((const int64_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vluxei64_v_i64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_i64m8_m((vint64m8_t)(op0), (const int64_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei64_v_u64m1(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u64m1((const uint64_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vluxei64_v_u64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u64m1_m((vuint64m1_t)(op0), (const uint64_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei64_v_u64m2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u64m2((const uint64_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vluxei64_v_u64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u64m2_m((vuint64m2_t)(op0), (const uint64_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei64_v_u64m4(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u64m4((const uint64_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vluxei64_v_u64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u64m4_m((vuint64m4_t)(op0), (const uint64_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei64_v_u64m8(op0, op1, op2) \
__builtin_rvv_vluxei64_v_u64m8((const uint64_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vluxei64_v_u64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_u64m8_m((vuint64m8_t)(op0), (const uint64_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei8_v_i8m1(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i8m1((const int8_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vloxei8_v_i8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i8m1_m((vint8m1_t)(op0), (const int8_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei8_v_i8m2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i8m2((const int8_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vloxei8_v_i8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i8m2_m((vint8m2_t)(op0), (const int8_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei8_v_i8m4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i8m4((const int8_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2))
#define vloxei8_v_i8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i8m4_m((vint8m4_t)(op0), (const int8_t *)(op1), (vuint8m4_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vloxei8_v_i8m8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i8m8((const int8_t *)(op0), (vuint8m8_t)(op1), (size_t)(op2))
#define vloxei8_v_i8m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i8m8_m((vint8m8_t)(op0), (const int8_t *)(op1), (vuint8m8_t)(op2), (vbool1_t)(op3), (size_t)(op4))
#define vloxei8_v_i8mf2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i8mf2((const int8_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vloxei8_v_i8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i8mf2_m((vint8mf2_t)(op0), (const int8_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei8_v_i8mf4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i8mf4((const int8_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vloxei8_v_i8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i8mf4_m((vint8mf4_t)(op0), (const int8_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei8_v_i8mf8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i8mf8((const int8_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vloxei8_v_i8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i8mf8_m((vint8mf8_t)(op0), (const int8_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei8_v_u8m1(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u8m1((const uint8_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vloxei8_v_u8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u8m1_m((vuint8m1_t)(op0), (const uint8_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei8_v_u8m2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u8m2((const uint8_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vloxei8_v_u8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u8m2_m((vuint8m2_t)(op0), (const uint8_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei8_v_u8m4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u8m4((const uint8_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2))
#define vloxei8_v_u8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u8m4_m((vuint8m4_t)(op0), (const uint8_t *)(op1), (vuint8m4_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vloxei8_v_u8m8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u8m8((const uint8_t *)(op0), (vuint8m8_t)(op1), (size_t)(op2))
#define vloxei8_v_u8m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u8m8_m((vuint8m8_t)(op0), (const uint8_t *)(op1), (vuint8m8_t)(op2), (vbool1_t)(op3), (size_t)(op4))
#define vloxei8_v_u8mf2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u8mf2((const uint8_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vloxei8_v_u8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u8mf2_m((vuint8mf2_t)(op0), (const uint8_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei8_v_u8mf4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u8mf4((const uint8_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vloxei8_v_u8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u8mf4_m((vuint8mf4_t)(op0), (const uint8_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei8_v_u8mf8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u8mf8((const uint8_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vloxei8_v_u8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u8mf8_m((vuint8mf8_t)(op0), (const uint8_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei16_v_i8m1(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i8m1((const int8_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vloxei16_v_i8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i8m1_m((vint8m1_t)(op0), (const int8_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei16_v_i8m2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i8m2((const int8_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vloxei16_v_i8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i8m2_m((vint8m2_t)(op0), (const int8_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei16_v_i8m4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i8m4((const int8_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2))
#define vloxei16_v_i8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i8m4_m((vint8m4_t)(op0), (const int8_t *)(op1), (vuint16m8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vloxei16_v_i8mf2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i8mf2((const int8_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vloxei16_v_i8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i8mf2_m((vint8mf2_t)(op0), (const int8_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei16_v_i8mf4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i8mf4((const int8_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vloxei16_v_i8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i8mf4_m((vint8mf4_t)(op0), (const int8_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei16_v_i8mf8(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i8mf8((const int8_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vloxei16_v_i8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i8mf8_m((vint8mf8_t)(op0), (const int8_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei16_v_u8m1(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u8m1((const uint8_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vloxei16_v_u8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u8m1_m((vuint8m1_t)(op0), (const uint8_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei16_v_u8m2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u8m2((const uint8_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vloxei16_v_u8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u8m2_m((vuint8m2_t)(op0), (const uint8_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei16_v_u8m4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u8m4((const uint8_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2))
#define vloxei16_v_u8m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u8m4_m((vuint8m4_t)(op0), (const uint8_t *)(op1), (vuint16m8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vloxei16_v_u8mf2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u8mf2((const uint8_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vloxei16_v_u8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u8mf2_m((vuint8mf2_t)(op0), (const uint8_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei16_v_u8mf4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u8mf4((const uint8_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vloxei16_v_u8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u8mf4_m((vuint8mf4_t)(op0), (const uint8_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei16_v_u8mf8(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u8mf8((const uint8_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vloxei16_v_u8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u8mf8_m((vuint8mf8_t)(op0), (const uint8_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei32_v_i8m1(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i8m1((const int8_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vloxei32_v_i8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i8m1_m((vint8m1_t)(op0), (const int8_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei32_v_i8m2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i8m2((const int8_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vloxei32_v_i8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i8m2_m((vint8m2_t)(op0), (const int8_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei32_v_i8mf2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i8mf2((const int8_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vloxei32_v_i8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i8mf2_m((vint8mf2_t)(op0), (const int8_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei32_v_i8mf4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i8mf4((const int8_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vloxei32_v_i8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i8mf4_m((vint8mf4_t)(op0), (const int8_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei32_v_i8mf8(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i8mf8((const int8_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vloxei32_v_i8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i8mf8_m((vint8mf8_t)(op0), (const int8_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei32_v_u8m1(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u8m1((const uint8_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vloxei32_v_u8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u8m1_m((vuint8m1_t)(op0), (const uint8_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei32_v_u8m2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u8m2((const uint8_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vloxei32_v_u8m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u8m2_m((vuint8m2_t)(op0), (const uint8_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei32_v_u8mf2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u8mf2((const uint8_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vloxei32_v_u8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u8mf2_m((vuint8mf2_t)(op0), (const uint8_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei32_v_u8mf4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u8mf4((const uint8_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vloxei32_v_u8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u8mf4_m((vuint8mf4_t)(op0), (const uint8_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei32_v_u8mf8(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u8mf8((const uint8_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vloxei32_v_u8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u8mf8_m((vuint8mf8_t)(op0), (const uint8_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei64_v_i8m1(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i8m1((const int8_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vloxei64_v_i8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i8m1_m((vint8m1_t)(op0), (const int8_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei64_v_i8mf2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i8mf2((const int8_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vloxei64_v_i8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i8mf2_m((vint8mf2_t)(op0), (const int8_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei64_v_i8mf4(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i8mf4((const int8_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vloxei64_v_i8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i8mf4_m((vint8mf4_t)(op0), (const int8_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei64_v_i8mf8(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i8mf8((const int8_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vloxei64_v_i8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i8mf8_m((vint8mf8_t)(op0), (const int8_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei64_v_u8m1(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u8m1((const uint8_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vloxei64_v_u8m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u8m1_m((vuint8m1_t)(op0), (const uint8_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei64_v_u8mf2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u8mf2((const uint8_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vloxei64_v_u8mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u8mf2_m((vuint8mf2_t)(op0), (const uint8_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei64_v_u8mf4(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u8mf4((const uint8_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vloxei64_v_u8mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u8mf4_m((vuint8mf4_t)(op0), (const uint8_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei64_v_u8mf8(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u8mf8((const uint8_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vloxei64_v_u8mf8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u8mf8_m((vuint8mf8_t)(op0), (const uint8_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei8_v_i16m1(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i16m1((const int16_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vloxei8_v_i16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i16m1_m((vint16m1_t)(op0), (const int16_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei8_v_i16m2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i16m2((const int16_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vloxei8_v_i16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i16m2_m((vint16m2_t)(op0), (const int16_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei8_v_i16m4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i16m4((const int16_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vloxei8_v_i16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i16m4_m((vint16m4_t)(op0), (const int16_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei8_v_i16m8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i16m8((const int16_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2))
#define vloxei8_v_i16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i16m8_m((vint16m8_t)(op0), (const int16_t *)(op1), (vuint8m4_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vloxei8_v_i16mf2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i16mf2((const int16_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vloxei8_v_i16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i16mf2_m((vint16mf2_t)(op0), (const int16_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei8_v_i16mf4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i16mf4((const int16_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vloxei8_v_i16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i16mf4_m((vint16mf4_t)(op0), (const int16_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei8_v_u16m1(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u16m1((const uint16_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vloxei8_v_u16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u16m1_m((vuint16m1_t)(op0), (const uint16_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei8_v_u16m2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u16m2((const uint16_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vloxei8_v_u16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u16m2_m((vuint16m2_t)(op0), (const uint16_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei8_v_u16m4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u16m4((const uint16_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vloxei8_v_u16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u16m4_m((vuint16m4_t)(op0), (const uint16_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei8_v_u16m8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u16m8((const uint16_t *)(op0), (vuint8m4_t)(op1), (size_t)(op2))
#define vloxei8_v_u16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u16m8_m((vuint16m8_t)(op0), (const uint16_t *)(op1), (vuint8m4_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vloxei8_v_u16mf2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u16mf2((const uint16_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vloxei8_v_u16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u16mf2_m((vuint16mf2_t)(op0), (const uint16_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei8_v_u16mf4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u16mf4((const uint16_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vloxei8_v_u16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u16mf4_m((vuint16mf4_t)(op0), (const uint16_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei16_v_i16m1(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i16m1((const int16_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vloxei16_v_i16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i16m1_m((vint16m1_t)(op0), (const int16_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei16_v_i16m2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i16m2((const int16_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vloxei16_v_i16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i16m2_m((vint16m2_t)(op0), (const int16_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei16_v_i16m4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i16m4((const int16_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vloxei16_v_i16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i16m4_m((vint16m4_t)(op0), (const int16_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei16_v_i16m8(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i16m8((const int16_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2))
#define vloxei16_v_i16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i16m8_m((vint16m8_t)(op0), (const int16_t *)(op1), (vuint16m8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vloxei16_v_i16mf2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i16mf2((const int16_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vloxei16_v_i16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i16mf2_m((vint16mf2_t)(op0), (const int16_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei16_v_i16mf4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i16mf4((const int16_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vloxei16_v_i16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i16mf4_m((vint16mf4_t)(op0), (const int16_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei16_v_u16m1(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u16m1((const uint16_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vloxei16_v_u16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u16m1_m((vuint16m1_t)(op0), (const uint16_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei16_v_u16m2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u16m2((const uint16_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vloxei16_v_u16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u16m2_m((vuint16m2_t)(op0), (const uint16_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei16_v_u16m4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u16m4((const uint16_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vloxei16_v_u16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u16m4_m((vuint16m4_t)(op0), (const uint16_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei16_v_u16m8(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u16m8((const uint16_t *)(op0), (vuint16m8_t)(op1), (size_t)(op2))
#define vloxei16_v_u16m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u16m8_m((vuint16m8_t)(op0), (const uint16_t *)(op1), (vuint16m8_t)(op2), (vbool2_t)(op3), (size_t)(op4))
#define vloxei16_v_u16mf2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u16mf2((const uint16_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vloxei16_v_u16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u16mf2_m((vuint16mf2_t)(op0), (const uint16_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei16_v_u16mf4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u16mf4((const uint16_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vloxei16_v_u16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u16mf4_m((vuint16mf4_t)(op0), (const uint16_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei32_v_i16m1(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i16m1((const int16_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vloxei32_v_i16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i16m1_m((vint16m1_t)(op0), (const int16_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei32_v_i16m2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i16m2((const int16_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vloxei32_v_i16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i16m2_m((vint16m2_t)(op0), (const int16_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei32_v_i16m4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i16m4((const int16_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vloxei32_v_i16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i16m4_m((vint16m4_t)(op0), (const int16_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei32_v_i16mf2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i16mf2((const int16_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vloxei32_v_i16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i16mf2_m((vint16mf2_t)(op0), (const int16_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei32_v_i16mf4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i16mf4((const int16_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vloxei32_v_i16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i16mf4_m((vint16mf4_t)(op0), (const int16_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei32_v_u16m1(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u16m1((const uint16_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vloxei32_v_u16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u16m1_m((vuint16m1_t)(op0), (const uint16_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei32_v_u16m2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u16m2((const uint16_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vloxei32_v_u16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u16m2_m((vuint16m2_t)(op0), (const uint16_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei32_v_u16m4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u16m4((const uint16_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vloxei32_v_u16m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u16m4_m((vuint16m4_t)(op0), (const uint16_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei32_v_u16mf2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u16mf2((const uint16_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vloxei32_v_u16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u16mf2_m((vuint16mf2_t)(op0), (const uint16_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei32_v_u16mf4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u16mf4((const uint16_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vloxei32_v_u16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u16mf4_m((vuint16mf4_t)(op0), (const uint16_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei64_v_i16m1(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i16m1((const int16_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vloxei64_v_i16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i16m1_m((vint16m1_t)(op0), (const int16_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei64_v_i16m2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i16m2((const int16_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vloxei64_v_i16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i16m2_m((vint16m2_t)(op0), (const int16_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei64_v_i16mf2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i16mf2((const int16_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vloxei64_v_i16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i16mf2_m((vint16mf2_t)(op0), (const int16_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei64_v_i16mf4(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i16mf4((const int16_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vloxei64_v_i16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i16mf4_m((vint16mf4_t)(op0), (const int16_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei64_v_u16m1(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u16m1((const uint16_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vloxei64_v_u16m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u16m1_m((vuint16m1_t)(op0), (const uint16_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei64_v_u16m2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u16m2((const uint16_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vloxei64_v_u16m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u16m2_m((vuint16m2_t)(op0), (const uint16_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei64_v_u16mf2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u16mf2((const uint16_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vloxei64_v_u16mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u16mf2_m((vuint16mf2_t)(op0), (const uint16_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei64_v_u16mf4(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u16mf4((const uint16_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vloxei64_v_u16mf4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u16mf4_m((vuint16mf4_t)(op0), (const uint16_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei8_v_i32m1(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i32m1((const int32_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vloxei8_v_i32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i32m1_m((vint32m1_t)(op0), (const int32_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei8_v_i32m2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i32m2((const int32_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vloxei8_v_i32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i32m2_m((vint32m2_t)(op0), (const int32_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei8_v_i32m4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i32m4((const int32_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vloxei8_v_i32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i32m4_m((vint32m4_t)(op0), (const int32_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei8_v_i32m8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i32m8((const int32_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vloxei8_v_i32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i32m8_m((vint32m8_t)(op0), (const int32_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei8_v_i32mf2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i32mf2((const int32_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vloxei8_v_i32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i32mf2_m((vint32mf2_t)(op0), (const int32_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei8_v_u32m1(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u32m1((const uint32_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vloxei8_v_u32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u32m1_m((vuint32m1_t)(op0), (const uint32_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei8_v_u32m2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u32m2((const uint32_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vloxei8_v_u32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u32m2_m((vuint32m2_t)(op0), (const uint32_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei8_v_u32m4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u32m4((const uint32_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vloxei8_v_u32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u32m4_m((vuint32m4_t)(op0), (const uint32_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei8_v_u32m8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u32m8((const uint32_t *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vloxei8_v_u32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u32m8_m((vuint32m8_t)(op0), (const uint32_t *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei8_v_u32mf2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u32mf2((const uint32_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vloxei8_v_u32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u32mf2_m((vuint32mf2_t)(op0), (const uint32_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei16_v_i32m1(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i32m1((const int32_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vloxei16_v_i32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i32m1_m((vint32m1_t)(op0), (const int32_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei16_v_i32m2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i32m2((const int32_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vloxei16_v_i32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i32m2_m((vint32m2_t)(op0), (const int32_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei16_v_i32m4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i32m4((const int32_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vloxei16_v_i32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i32m4_m((vint32m4_t)(op0), (const int32_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei16_v_i32m8(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i32m8((const int32_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vloxei16_v_i32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i32m8_m((vint32m8_t)(op0), (const int32_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei16_v_i32mf2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i32mf2((const int32_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vloxei16_v_i32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i32mf2_m((vint32mf2_t)(op0), (const int32_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei16_v_u32m1(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u32m1((const uint32_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vloxei16_v_u32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u32m1_m((vuint32m1_t)(op0), (const uint32_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei16_v_u32m2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u32m2((const uint32_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vloxei16_v_u32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u32m2_m((vuint32m2_t)(op0), (const uint32_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei16_v_u32m4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u32m4((const uint32_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vloxei16_v_u32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u32m4_m((vuint32m4_t)(op0), (const uint32_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei16_v_u32m8(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u32m8((const uint32_t *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vloxei16_v_u32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u32m8_m((vuint32m8_t)(op0), (const uint32_t *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei16_v_u32mf2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u32mf2((const uint32_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vloxei16_v_u32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u32mf2_m((vuint32mf2_t)(op0), (const uint32_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei32_v_i32m1(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i32m1((const int32_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vloxei32_v_i32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i32m1_m((vint32m1_t)(op0), (const int32_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei32_v_i32m2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i32m2((const int32_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vloxei32_v_i32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i32m2_m((vint32m2_t)(op0), (const int32_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei32_v_i32m4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i32m4((const int32_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vloxei32_v_i32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i32m4_m((vint32m4_t)(op0), (const int32_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei32_v_i32m8(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i32m8((const int32_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vloxei32_v_i32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i32m8_m((vint32m8_t)(op0), (const int32_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei32_v_i32mf2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i32mf2((const int32_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vloxei32_v_i32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i32mf2_m((vint32mf2_t)(op0), (const int32_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei32_v_u32m1(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u32m1((const uint32_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vloxei32_v_u32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u32m1_m((vuint32m1_t)(op0), (const uint32_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei32_v_u32m2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u32m2((const uint32_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vloxei32_v_u32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u32m2_m((vuint32m2_t)(op0), (const uint32_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei32_v_u32m4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u32m4((const uint32_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vloxei32_v_u32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u32m4_m((vuint32m4_t)(op0), (const uint32_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei32_v_u32m8(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u32m8((const uint32_t *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vloxei32_v_u32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u32m8_m((vuint32m8_t)(op0), (const uint32_t *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei32_v_u32mf2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u32mf2((const uint32_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vloxei32_v_u32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u32mf2_m((vuint32mf2_t)(op0), (const uint32_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei64_v_i32m1(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i32m1((const int32_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vloxei64_v_i32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i32m1_m((vint32m1_t)(op0), (const int32_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei64_v_i32m2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i32m2((const int32_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vloxei64_v_i32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i32m2_m((vint32m2_t)(op0), (const int32_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei64_v_i32m4(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i32m4((const int32_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vloxei64_v_i32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i32m4_m((vint32m4_t)(op0), (const int32_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei64_v_i32mf2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i32mf2((const int32_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vloxei64_v_i32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i32mf2_m((vint32mf2_t)(op0), (const int32_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei64_v_u32m1(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u32m1((const uint32_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vloxei64_v_u32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u32m1_m((vuint32m1_t)(op0), (const uint32_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei64_v_u32m2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u32m2((const uint32_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vloxei64_v_u32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u32m2_m((vuint32m2_t)(op0), (const uint32_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei64_v_u32m4(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u32m4((const uint32_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vloxei64_v_u32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u32m4_m((vuint32m4_t)(op0), (const uint32_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei64_v_u32mf2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_u32mf2((const uint32_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vloxei64_v_u32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_u32mf2_m((vuint32mf2_t)(op0), (const uint32_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei8_v_i64m1(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i64m1((const int64_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vloxei8_v_i64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i64m1_m((vint64m1_t)(op0), (const int64_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei8_v_i64m2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i64m2((const int64_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vloxei8_v_i64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i64m2_m((vint64m2_t)(op0), (const int64_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei8_v_i64m4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i64m4((const int64_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vloxei8_v_i64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i64m4_m((vint64m4_t)(op0), (const int64_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei8_v_i64m8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_i64m8((const int64_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vloxei8_v_i64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_i64m8_m((vint64m8_t)(op0), (const int64_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei8_v_u64m1(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u64m1((const uint64_t *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vloxei8_v_u64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u64m1_m((vuint64m1_t)(op0), (const uint64_t *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei8_v_u64m2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u64m2((const uint64_t *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vloxei8_v_u64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u64m2_m((vuint64m2_t)(op0), (const uint64_t *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei8_v_u64m4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u64m4((const uint64_t *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vloxei8_v_u64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u64m4_m((vuint64m4_t)(op0), (const uint64_t *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei8_v_u64m8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_u64m8((const uint64_t *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vloxei8_v_u64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_u64m8_m((vuint64m8_t)(op0), (const uint64_t *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei16_v_i64m1(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i64m1((const int64_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vloxei16_v_i64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i64m1_m((vint64m1_t)(op0), (const int64_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei16_v_i64m2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i64m2((const int64_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vloxei16_v_i64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i64m2_m((vint64m2_t)(op0), (const int64_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei16_v_i64m4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i64m4((const int64_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vloxei16_v_i64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i64m4_m((vint64m4_t)(op0), (const int64_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei16_v_i64m8(op0, op1, op2) \
__builtin_rvv_vloxei16_v_i64m8((const int64_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vloxei16_v_i64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_i64m8_m((vint64m8_t)(op0), (const int64_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei16_v_u64m1(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u64m1((const uint64_t *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vloxei16_v_u64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u64m1_m((vuint64m1_t)(op0), (const uint64_t *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei16_v_u64m2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u64m2((const uint64_t *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vloxei16_v_u64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u64m2_m((vuint64m2_t)(op0), (const uint64_t *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei16_v_u64m4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u64m4((const uint64_t *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vloxei16_v_u64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u64m4_m((vuint64m4_t)(op0), (const uint64_t *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei16_v_u64m8(op0, op1, op2) \
__builtin_rvv_vloxei16_v_u64m8((const uint64_t *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vloxei16_v_u64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_u64m8_m((vuint64m8_t)(op0), (const uint64_t *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei32_v_i64m1(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i64m1((const int64_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vloxei32_v_i64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i64m1_m((vint64m1_t)(op0), (const int64_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei32_v_i64m2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i64m2((const int64_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vloxei32_v_i64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i64m2_m((vint64m2_t)(op0), (const int64_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei32_v_i64m4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i64m4((const int64_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vloxei32_v_i64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i64m4_m((vint64m4_t)(op0), (const int64_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei32_v_i64m8(op0, op1, op2) \
__builtin_rvv_vloxei32_v_i64m8((const int64_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vloxei32_v_i64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_i64m8_m((vint64m8_t)(op0), (const int64_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei32_v_u64m1(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u64m1((const uint64_t *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vloxei32_v_u64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u64m1_m((vuint64m1_t)(op0), (const uint64_t *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei32_v_u64m2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u64m2((const uint64_t *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vloxei32_v_u64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u64m2_m((vuint64m2_t)(op0), (const uint64_t *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei32_v_u64m4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u64m4((const uint64_t *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vloxei32_v_u64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u64m4_m((vuint64m4_t)(op0), (const uint64_t *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei32_v_u64m8(op0, op1, op2) \
__builtin_rvv_vloxei32_v_u64m8((const uint64_t *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vloxei32_v_u64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_u64m8_m((vuint64m8_t)(op0), (const uint64_t *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei64_v_i64m1(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i64m1((const int64_t *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vloxei64_v_i64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i64m1_m((vint64m1_t)(op0), (const int64_t *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei64_v_i64m2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i64m2((const int64_t *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vloxei64_v_i64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i64m2_m((vint64m2_t)(op0), (const int64_t *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei64_v_i64m4(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i64m4((const int64_t *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vloxei64_v_i64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i64m4_m((vint64m4_t)(op0), (const int64_t *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei64_v_i64m8(op0, op1, op2) \
__builtin_rvv_vloxei64_v_i64m8((const int64_t *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vloxei64_v_i64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_i64m8_m((vint64m8_t)(op0), (const int64_t *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#if defined(__riscv_f)
#define vloxei8_v_f32m1(op0, op1, op2) \
__builtin_rvv_vloxei8_v_f32m1((const float *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vloxei8_v_f32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_f32m1_m((vfloat32m1_t)(op0), (const float *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei8_v_f32m2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_f32m2((const float *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vloxei8_v_f32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_f32m2_m((vfloat32m2_t)(op0), (const float *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei8_v_f32m4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_f32m4((const float *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vloxei8_v_f32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_f32m4_m((vfloat32m4_t)(op0), (const float *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei8_v_f32m8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_f32m8((const float *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vloxei8_v_f32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_f32m8_m((vfloat32m8_t)(op0), (const float *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei8_v_f32mf2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_f32mf2((const float *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vloxei8_v_f32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_f32mf2_m((vfloat32mf2_t)(op0), (const float *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei16_v_f32m1(op0, op1, op2) \
__builtin_rvv_vloxei16_v_f32m1((const float *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vloxei16_v_f32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_f32m1_m((vfloat32m1_t)(op0), (const float *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei16_v_f32m2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_f32m2((const float *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vloxei16_v_f32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_f32m2_m((vfloat32m2_t)(op0), (const float *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei16_v_f32m4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_f32m4((const float *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vloxei16_v_f32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_f32m4_m((vfloat32m4_t)(op0), (const float *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei16_v_f32m8(op0, op1, op2) \
__builtin_rvv_vloxei16_v_f32m8((const float *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vloxei16_v_f32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_f32m8_m((vfloat32m8_t)(op0), (const float *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei16_v_f32mf2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_f32mf2((const float *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vloxei16_v_f32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_f32mf2_m((vfloat32mf2_t)(op0), (const float *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei32_v_f32m1(op0, op1, op2) \
__builtin_rvv_vloxei32_v_f32m1((const float *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vloxei32_v_f32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_f32m1_m((vfloat32m1_t)(op0), (const float *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei32_v_f32m2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_f32m2((const float *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vloxei32_v_f32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_f32m2_m((vfloat32m2_t)(op0), (const float *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei32_v_f32m4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_f32m4((const float *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vloxei32_v_f32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_f32m4_m((vfloat32m4_t)(op0), (const float *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei32_v_f32m8(op0, op1, op2) \
__builtin_rvv_vloxei32_v_f32m8((const float *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vloxei32_v_f32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_f32m8_m((vfloat32m8_t)(op0), (const float *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vloxei32_v_f32mf2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_f32mf2((const float *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vloxei32_v_f32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_f32mf2_m((vfloat32mf2_t)(op0), (const float *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei64_v_f32m1(op0, op1, op2) \
__builtin_rvv_vloxei64_v_f32m1((const float *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vloxei64_v_f32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_f32m1_m((vfloat32m1_t)(op0), (const float *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei64_v_f32m2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_f32m2((const float *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vloxei64_v_f32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_f32m2_m((vfloat32m2_t)(op0), (const float *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei64_v_f32m4(op0, op1, op2) \
__builtin_rvv_vloxei64_v_f32m4((const float *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vloxei64_v_f32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_f32m4_m((vfloat32m4_t)(op0), (const float *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei64_v_f32mf2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_f32mf2((const float *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vloxei64_v_f32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_f32mf2_m((vfloat32mf2_t)(op0), (const float *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vfadd_vv_f32m1(op0, op1, op2) \
__builtin_rvv_vfadd_vv_f32m1((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (size_t)(op2))
#define vfadd_vv_f32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vv_f32m1_m((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (vfloat32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vfadd_vv_f32m2(op0, op1, op2) \
__builtin_rvv_vfadd_vv_f32m2((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (size_t)(op2))
#define vfadd_vv_f32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vv_f32m2_m((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (vfloat32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vfadd_vv_f32m4(op0, op1, op2) \
__builtin_rvv_vfadd_vv_f32m4((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (size_t)(op2))
#define vfadd_vv_f32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vv_f32m4_m((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (vfloat32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vfadd_vv_f32m8(op0, op1, op2) \
__builtin_rvv_vfadd_vv_f32m8((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (size_t)(op2))
#define vfadd_vv_f32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vv_f32m8_m((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (vfloat32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vfadd_vv_f32mf2(op0, op1, op2) \
__builtin_rvv_vfadd_vv_f32mf2((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (size_t)(op2))
#define vfadd_vv_f32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vv_f32mf2_m((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (vfloat32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vfadd_vf_f32m1(op0, op1, op2) \
__builtin_rvv_vfadd_vf_f32m1((vfloat32m1_t)(op0), (float)(op1), (size_t)(op2))
#define vfadd_vf_f32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vf_f32m1_m((vfloat32m1_t)(op0), (vfloat32m1_t)(op1), (float)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vfadd_vf_f32m2(op0, op1, op2) \
__builtin_rvv_vfadd_vf_f32m2((vfloat32m2_t)(op0), (float)(op1), (size_t)(op2))
#define vfadd_vf_f32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vf_f32m2_m((vfloat32m2_t)(op0), (vfloat32m2_t)(op1), (float)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vfadd_vf_f32m4(op0, op1, op2) \
__builtin_rvv_vfadd_vf_f32m4((vfloat32m4_t)(op0), (float)(op1), (size_t)(op2))
#define vfadd_vf_f32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vf_f32m4_m((vfloat32m4_t)(op0), (vfloat32m4_t)(op1), (float)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vfadd_vf_f32m8(op0, op1, op2) \
__builtin_rvv_vfadd_vf_f32m8((vfloat32m8_t)(op0), (float)(op1), (size_t)(op2))
#define vfadd_vf_f32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vf_f32m8_m((vfloat32m8_t)(op0), (vfloat32m8_t)(op1), (float)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vfadd_vf_f32mf2(op0, op1, op2) \
__builtin_rvv_vfadd_vf_f32mf2((vfloat32mf2_t)(op0), (float)(op1), (size_t)(op2))
#define vfadd_vf_f32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vf_f32mf2_m((vfloat32mf2_t)(op0), (vfloat32mf2_t)(op1), (float)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vle32_v_f32m1(op0, op1) \
__builtin_rvv_vle32_v_f32m1((const float *)(op0), (size_t)(op1))
#define vle32_v_f32m1_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_f32m1_m((vfloat32m1_t)(op0), (const float *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vle32_v_f32m2(op0, op1) \
__builtin_rvv_vle32_v_f32m2((const float *)(op0), (size_t)(op1))
#define vle32_v_f32m2_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_f32m2_m((vfloat32m2_t)(op0), (const float *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vle32_v_f32m4(op0, op1) \
__builtin_rvv_vle32_v_f32m4((const float *)(op0), (size_t)(op1))
#define vle32_v_f32m4_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_f32m4_m((vfloat32m4_t)(op0), (const float *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vle32_v_f32m8(op0, op1) \
__builtin_rvv_vle32_v_f32m8((const float *)(op0), (size_t)(op1))
#define vle32_v_f32m8_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_f32m8_m((vfloat32m8_t)(op0), (const float *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vle32_v_f32mf2(op0, op1) \
__builtin_rvv_vle32_v_f32mf2((const float *)(op0), (size_t)(op1))
#define vle32_v_f32mf2_m(op2, op0, op1, op3) \
__builtin_rvv_vle32_v_f32mf2_m((vfloat32mf2_t)(op0), (const float *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vse32_v_f32m1(op1, op0, op2) \
__builtin_rvv_vse32_v_f32m1((vfloat32m1_t)(op0), (float *)(op1), (size_t)(op2))
#define vse32_v_f32m1_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_f32m1_m((vfloat32m1_t)(op0), (float *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vse32_v_f32m2(op1, op0, op2) \
__builtin_rvv_vse32_v_f32m2((vfloat32m2_t)(op0), (float *)(op1), (size_t)(op2))
#define vse32_v_f32m2_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_f32m2_m((vfloat32m2_t)(op0), (float *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vse32_v_f32m4(op1, op0, op2) \
__builtin_rvv_vse32_v_f32m4((vfloat32m4_t)(op0), (float *)(op1), (size_t)(op2))
#define vse32_v_f32m4_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_f32m4_m((vfloat32m4_t)(op0), (float *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vse32_v_f32m8(op1, op0, op2) \
__builtin_rvv_vse32_v_f32m8((vfloat32m8_t)(op0), (float *)(op1), (size_t)(op2))
#define vse32_v_f32m8_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_f32m8_m((vfloat32m8_t)(op0), (float *)(op1), (vbool4_t)(op2), (size_t)(op3))
#define vse32_v_f32mf2(op1, op0, op2) \
__builtin_rvv_vse32_v_f32mf2((vfloat32mf2_t)(op0), (float *)(op1), (size_t)(op2))
#define vse32_v_f32mf2_m(op2, op1, op0, op3) \
__builtin_rvv_vse32_v_f32mf2_m((vfloat32mf2_t)(op0), (float *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vluxei8_v_f32m1(op0, op1, op2) \
__builtin_rvv_vluxei8_v_f32m1((const float *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vluxei8_v_f32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_f32m1_m((vfloat32m1_t)(op0), (const float *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei8_v_f32m2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_f32m2((const float *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vluxei8_v_f32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_f32m2_m((vfloat32m2_t)(op0), (const float *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei8_v_f32m4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_f32m4((const float *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vluxei8_v_f32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_f32m4_m((vfloat32m4_t)(op0), (const float *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei8_v_f32m8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_f32m8((const float *)(op0), (vuint8m2_t)(op1), (size_t)(op2))
#define vluxei8_v_f32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_f32m8_m((vfloat32m8_t)(op0), (const float *)(op1), (vuint8m2_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei8_v_f32mf2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_f32mf2((const float *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vluxei8_v_f32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_f32mf2_m((vfloat32mf2_t)(op0), (const float *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei16_v_f32m1(op0, op1, op2) \
__builtin_rvv_vluxei16_v_f32m1((const float *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vluxei16_v_f32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_f32m1_m((vfloat32m1_t)(op0), (const float *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei16_v_f32m2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_f32m2((const float *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vluxei16_v_f32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_f32m2_m((vfloat32m2_t)(op0), (const float *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei16_v_f32m4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_f32m4((const float *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vluxei16_v_f32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_f32m4_m((vfloat32m4_t)(op0), (const float *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei16_v_f32m8(op0, op1, op2) \
__builtin_rvv_vluxei16_v_f32m8((const float *)(op0), (vuint16m4_t)(op1), (size_t)(op2))
#define vluxei16_v_f32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_f32m8_m((vfloat32m8_t)(op0), (const float *)(op1), (vuint16m4_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei16_v_f32mf2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_f32mf2((const float *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vluxei16_v_f32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_f32mf2_m((vfloat32mf2_t)(op0), (const float *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei32_v_f32m1(op0, op1, op2) \
__builtin_rvv_vluxei32_v_f32m1((const float *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vluxei32_v_f32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_f32m1_m((vfloat32m1_t)(op0), (const float *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei32_v_f32m2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_f32m2((const float *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vluxei32_v_f32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_f32m2_m((vfloat32m2_t)(op0), (const float *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei32_v_f32m4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_f32m4((const float *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vluxei32_v_f32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_f32m4_m((vfloat32m4_t)(op0), (const float *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei32_v_f32m8(op0, op1, op2) \
__builtin_rvv_vluxei32_v_f32m8((const float *)(op0), (vuint32m8_t)(op1), (size_t)(op2))
#define vluxei32_v_f32m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_f32m8_m((vfloat32m8_t)(op0), (const float *)(op1), (vuint32m8_t)(op2), (vbool4_t)(op3), (size_t)(op4))
#define vluxei32_v_f32mf2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_f32mf2((const float *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vluxei32_v_f32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_f32mf2_m((vfloat32mf2_t)(op0), (const float *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei64_v_f32m1(op0, op1, op2) \
__builtin_rvv_vluxei64_v_f32m1((const float *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vluxei64_v_f32m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_f32m1_m((vfloat32m1_t)(op0), (const float *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei64_v_f32m2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_f32m2((const float *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vluxei64_v_f32m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_f32m2_m((vfloat32m2_t)(op0), (const float *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei64_v_f32m4(op0, op1, op2) \
__builtin_rvv_vluxei64_v_f32m4((const float *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vluxei64_v_f32m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_f32m4_m((vfloat32m4_t)(op0), (const float *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei64_v_f32mf2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_f32mf2((const float *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vluxei64_v_f32mf2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_f32mf2_m((vfloat32mf2_t)(op0), (const float *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#endif
#if defined(__riscv_d)
#define vloxei8_v_f64m1(op0, op1, op2) \
__builtin_rvv_vloxei8_v_f64m1((const double *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vloxei8_v_f64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_f64m1_m((vfloat64m1_t)(op0), (const double *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei8_v_f64m2(op0, op1, op2) \
__builtin_rvv_vloxei8_v_f64m2((const double *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vloxei8_v_f64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_f64m2_m((vfloat64m2_t)(op0), (const double *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei8_v_f64m4(op0, op1, op2) \
__builtin_rvv_vloxei8_v_f64m4((const double *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vloxei8_v_f64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_f64m4_m((vfloat64m4_t)(op0), (const double *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei8_v_f64m8(op0, op1, op2) \
__builtin_rvv_vloxei8_v_f64m8((const double *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vloxei8_v_f64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei8_v_f64m8_m((vfloat64m8_t)(op0), (const double *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei16_v_f64m1(op0, op1, op2) \
__builtin_rvv_vloxei16_v_f64m1((const double *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vloxei16_v_f64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_f64m1_m((vfloat64m1_t)(op0), (const double *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei16_v_f64m2(op0, op1, op2) \
__builtin_rvv_vloxei16_v_f64m2((const double *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vloxei16_v_f64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_f64m2_m((vfloat64m2_t)(op0), (const double *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei16_v_f64m4(op0, op1, op2) \
__builtin_rvv_vloxei16_v_f64m4((const double *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vloxei16_v_f64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_f64m4_m((vfloat64m4_t)(op0), (const double *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei16_v_f64m8(op0, op1, op2) \
__builtin_rvv_vloxei16_v_f64m8((const double *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vloxei16_v_f64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei16_v_f64m8_m((vfloat64m8_t)(op0), (const double *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei32_v_f64m1(op0, op1, op2) \
__builtin_rvv_vloxei32_v_f64m1((const double *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vloxei32_v_f64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_f64m1_m((vfloat64m1_t)(op0), (const double *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei32_v_f64m2(op0, op1, op2) \
__builtin_rvv_vloxei32_v_f64m2((const double *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vloxei32_v_f64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_f64m2_m((vfloat64m2_t)(op0), (const double *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei32_v_f64m4(op0, op1, op2) \
__builtin_rvv_vloxei32_v_f64m4((const double *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vloxei32_v_f64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_f64m4_m((vfloat64m4_t)(op0), (const double *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei32_v_f64m8(op0, op1, op2) \
__builtin_rvv_vloxei32_v_f64m8((const double *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vloxei32_v_f64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei32_v_f64m8_m((vfloat64m8_t)(op0), (const double *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vloxei64_v_f64m1(op0, op1, op2) \
__builtin_rvv_vloxei64_v_f64m1((const double *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vloxei64_v_f64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_f64m1_m((vfloat64m1_t)(op0), (const double *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vloxei64_v_f64m2(op0, op1, op2) \
__builtin_rvv_vloxei64_v_f64m2((const double *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vloxei64_v_f64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_f64m2_m((vfloat64m2_t)(op0), (const double *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vloxei64_v_f64m4(op0, op1, op2) \
__builtin_rvv_vloxei64_v_f64m4((const double *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vloxei64_v_f64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_f64m4_m((vfloat64m4_t)(op0), (const double *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vloxei64_v_f64m8(op0, op1, op2) \
__builtin_rvv_vloxei64_v_f64m8((const double *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vloxei64_v_f64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vloxei64_v_f64m8_m((vfloat64m8_t)(op0), (const double *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vfadd_vv_f64m1(op0, op1, op2) \
__builtin_rvv_vfadd_vv_f64m1((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (size_t)(op2))
#define vfadd_vv_f64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vv_f64m1_m((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (vfloat64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vfadd_vv_f64m2(op0, op1, op2) \
__builtin_rvv_vfadd_vv_f64m2((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (size_t)(op2))
#define vfadd_vv_f64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vv_f64m2_m((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (vfloat64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vfadd_vv_f64m4(op0, op1, op2) \
__builtin_rvv_vfadd_vv_f64m4((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (size_t)(op2))
#define vfadd_vv_f64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vv_f64m4_m((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (vfloat64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vfadd_vv_f64m8(op0, op1, op2) \
__builtin_rvv_vfadd_vv_f64m8((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (size_t)(op2))
#define vfadd_vv_f64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vv_f64m8_m((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (vfloat64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vfadd_vf_f64m1(op0, op1, op2) \
__builtin_rvv_vfadd_vf_f64m1((vfloat64m1_t)(op0), (double)(op1), (size_t)(op2))
#define vfadd_vf_f64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vf_f64m1_m((vfloat64m1_t)(op0), (vfloat64m1_t)(op1), (double)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vfadd_vf_f64m2(op0, op1, op2) \
__builtin_rvv_vfadd_vf_f64m2((vfloat64m2_t)(op0), (double)(op1), (size_t)(op2))
#define vfadd_vf_f64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vf_f64m2_m((vfloat64m2_t)(op0), (vfloat64m2_t)(op1), (double)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vfadd_vf_f64m4(op0, op1, op2) \
__builtin_rvv_vfadd_vf_f64m4((vfloat64m4_t)(op0), (double)(op1), (size_t)(op2))
#define vfadd_vf_f64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vf_f64m4_m((vfloat64m4_t)(op0), (vfloat64m4_t)(op1), (double)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vfadd_vf_f64m8(op0, op1, op2) \
__builtin_rvv_vfadd_vf_f64m8((vfloat64m8_t)(op0), (double)(op1), (size_t)(op2))
#define vfadd_vf_f64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vfadd_vf_f64m8_m((vfloat64m8_t)(op0), (vfloat64m8_t)(op1), (double)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vle64_v_f64m1(op0, op1) \
__builtin_rvv_vle64_v_f64m1((const double *)(op0), (size_t)(op1))
#define vle64_v_f64m1_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_f64m1_m((vfloat64m1_t)(op0), (const double *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vle64_v_f64m2(op0, op1) \
__builtin_rvv_vle64_v_f64m2((const double *)(op0), (size_t)(op1))
#define vle64_v_f64m2_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_f64m2_m((vfloat64m2_t)(op0), (const double *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vle64_v_f64m4(op0, op1) \
__builtin_rvv_vle64_v_f64m4((const double *)(op0), (size_t)(op1))
#define vle64_v_f64m4_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_f64m4_m((vfloat64m4_t)(op0), (const double *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vle64_v_f64m8(op0, op1) \
__builtin_rvv_vle64_v_f64m8((const double *)(op0), (size_t)(op1))
#define vle64_v_f64m8_m(op2, op0, op1, op3) \
__builtin_rvv_vle64_v_f64m8_m((vfloat64m8_t)(op0), (const double *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vse64_v_f64m1(op1, op0, op2) \
__builtin_rvv_vse64_v_f64m1((vfloat64m1_t)(op0), (double *)(op1), (size_t)(op2))
#define vse64_v_f64m1_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_f64m1_m((vfloat64m1_t)(op0), (double *)(op1), (vbool64_t)(op2), (size_t)(op3))
#define vse64_v_f64m2(op1, op0, op2) \
__builtin_rvv_vse64_v_f64m2((vfloat64m2_t)(op0), (double *)(op1), (size_t)(op2))
#define vse64_v_f64m2_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_f64m2_m((vfloat64m2_t)(op0), (double *)(op1), (vbool32_t)(op2), (size_t)(op3))
#define vse64_v_f64m4(op1, op0, op2) \
__builtin_rvv_vse64_v_f64m4((vfloat64m4_t)(op0), (double *)(op1), (size_t)(op2))
#define vse64_v_f64m4_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_f64m4_m((vfloat64m4_t)(op0), (double *)(op1), (vbool16_t)(op2), (size_t)(op3))
#define vse64_v_f64m8(op1, op0, op2) \
__builtin_rvv_vse64_v_f64m8((vfloat64m8_t)(op0), (double *)(op1), (size_t)(op2))
#define vse64_v_f64m8_m(op2, op1, op0, op3) \
__builtin_rvv_vse64_v_f64m8_m((vfloat64m8_t)(op0), (double *)(op1), (vbool8_t)(op2), (size_t)(op3))
#define vluxei8_v_f64m1(op0, op1, op2) \
__builtin_rvv_vluxei8_v_f64m1((const double *)(op0), (vuint8mf8_t)(op1), (size_t)(op2))
#define vluxei8_v_f64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_f64m1_m((vfloat64m1_t)(op0), (const double *)(op1), (vuint8mf8_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei8_v_f64m2(op0, op1, op2) \
__builtin_rvv_vluxei8_v_f64m2((const double *)(op0), (vuint8mf4_t)(op1), (size_t)(op2))
#define vluxei8_v_f64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_f64m2_m((vfloat64m2_t)(op0), (const double *)(op1), (vuint8mf4_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei8_v_f64m4(op0, op1, op2) \
__builtin_rvv_vluxei8_v_f64m4((const double *)(op0), (vuint8mf2_t)(op1), (size_t)(op2))
#define vluxei8_v_f64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_f64m4_m((vfloat64m4_t)(op0), (const double *)(op1), (vuint8mf2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei8_v_f64m8(op0, op1, op2) \
__builtin_rvv_vluxei8_v_f64m8((const double *)(op0), (vuint8m1_t)(op1), (size_t)(op2))
#define vluxei8_v_f64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei8_v_f64m8_m((vfloat64m8_t)(op0), (const double *)(op1), (vuint8m1_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei16_v_f64m1(op0, op1, op2) \
__builtin_rvv_vluxei16_v_f64m1((const double *)(op0), (vuint16mf4_t)(op1), (size_t)(op2))
#define vluxei16_v_f64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_f64m1_m((vfloat64m1_t)(op0), (const double *)(op1), (vuint16mf4_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei16_v_f64m2(op0, op1, op2) \
__builtin_rvv_vluxei16_v_f64m2((const double *)(op0), (vuint16mf2_t)(op1), (size_t)(op2))
#define vluxei16_v_f64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_f64m2_m((vfloat64m2_t)(op0), (const double *)(op1), (vuint16mf2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei16_v_f64m4(op0, op1, op2) \
__builtin_rvv_vluxei16_v_f64m4((const double *)(op0), (vuint16m1_t)(op1), (size_t)(op2))
#define vluxei16_v_f64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_f64m4_m((vfloat64m4_t)(op0), (const double *)(op1), (vuint16m1_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei16_v_f64m8(op0, op1, op2) \
__builtin_rvv_vluxei16_v_f64m8((const double *)(op0), (vuint16m2_t)(op1), (size_t)(op2))
#define vluxei16_v_f64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei16_v_f64m8_m((vfloat64m8_t)(op0), (const double *)(op1), (vuint16m2_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei32_v_f64m1(op0, op1, op2) \
__builtin_rvv_vluxei32_v_f64m1((const double *)(op0), (vuint32mf2_t)(op1), (size_t)(op2))
#define vluxei32_v_f64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_f64m1_m((vfloat64m1_t)(op0), (const double *)(op1), (vuint32mf2_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei32_v_f64m2(op0, op1, op2) \
__builtin_rvv_vluxei32_v_f64m2((const double *)(op0), (vuint32m1_t)(op1), (size_t)(op2))
#define vluxei32_v_f64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_f64m2_m((vfloat64m2_t)(op0), (const double *)(op1), (vuint32m1_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei32_v_f64m4(op0, op1, op2) \
__builtin_rvv_vluxei32_v_f64m4((const double *)(op0), (vuint32m2_t)(op1), (size_t)(op2))
#define vluxei32_v_f64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_f64m4_m((vfloat64m4_t)(op0), (const double *)(op1), (vuint32m2_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei32_v_f64m8(op0, op1, op2) \
__builtin_rvv_vluxei32_v_f64m8((const double *)(op0), (vuint32m4_t)(op1), (size_t)(op2))
#define vluxei32_v_f64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei32_v_f64m8_m((vfloat64m8_t)(op0), (const double *)(op1), (vuint32m4_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#define vluxei64_v_f64m1(op0, op1, op2) \
__builtin_rvv_vluxei64_v_f64m1((const double *)(op0), (vuint64m1_t)(op1), (size_t)(op2))
#define vluxei64_v_f64m1_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_f64m1_m((vfloat64m1_t)(op0), (const double *)(op1), (vuint64m1_t)(op2), (vbool64_t)(op3), (size_t)(op4))
#define vluxei64_v_f64m2(op0, op1, op2) \
__builtin_rvv_vluxei64_v_f64m2((const double *)(op0), (vuint64m2_t)(op1), (size_t)(op2))
#define vluxei64_v_f64m2_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_f64m2_m((vfloat64m2_t)(op0), (const double *)(op1), (vuint64m2_t)(op2), (vbool32_t)(op3), (size_t)(op4))
#define vluxei64_v_f64m4(op0, op1, op2) \
__builtin_rvv_vluxei64_v_f64m4((const double *)(op0), (vuint64m4_t)(op1), (size_t)(op2))
#define vluxei64_v_f64m4_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_f64m4_m((vfloat64m4_t)(op0), (const double *)(op1), (vuint64m4_t)(op2), (vbool16_t)(op3), (size_t)(op4))
#define vluxei64_v_f64m8(op0, op1, op2) \
__builtin_rvv_vluxei64_v_f64m8((const double *)(op0), (vuint64m8_t)(op1), (size_t)(op2))
#define vluxei64_v_f64m8_m(op3, op0, op1, op2, op4) \
__builtin_rvv_vluxei64_v_f64m8_m((vfloat64m8_t)(op0), (const double *)(op1), (vuint64m8_t)(op2), (vbool8_t)(op3), (size_t)(op4))
#endif
#define __riscv_v_intrinsic_overloading 1
#define __rvv_overloaded static inline __attribute__((__always_inline__, __nodebug__, __overloadable__))
__rvv_overloaded vint8m1_t vadd(vint8m1_t op0, vint8m1_t op1, size_t op2){
return vadd_vv_i8m1(op0, op1, op2);
}
__rvv_overloaded vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, vint8m1_t op3, size_t op4){
return vadd_vv_i8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m2_t vadd(vint8m2_t op0, vint8m2_t op1, size_t op2){
return vadd_vv_i8m2(op0, op1, op2);
}
__rvv_overloaded vint8m2_t vadd(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, vint8m2_t op3, size_t op4){
return vadd_vv_i8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m4_t vadd(vint8m4_t op0, vint8m4_t op1, size_t op2){
return vadd_vv_i8m4(op0, op1, op2);
}
__rvv_overloaded vint8m4_t vadd(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, vint8m4_t op3, size_t op4){
return vadd_vv_i8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m8_t vadd(vint8m8_t op0, vint8m8_t op1, size_t op2){
return vadd_vv_i8m8(op0, op1, op2);
}
__rvv_overloaded vint8m8_t vadd(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, vint8m8_t op3, size_t op4){
return vadd_vv_i8m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf2_t vadd(vint8mf2_t op0, vint8mf2_t op1, size_t op2){
return vadd_vv_i8mf2(op0, op1, op2);
}
__rvv_overloaded vint8mf2_t vadd(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, vint8mf2_t op3, size_t op4){
return vadd_vv_i8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf4_t vadd(vint8mf4_t op0, vint8mf4_t op1, size_t op2){
return vadd_vv_i8mf4(op0, op1, op2);
}
__rvv_overloaded vint8mf4_t vadd(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, vint8mf4_t op3, size_t op4){
return vadd_vv_i8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf8_t vadd(vint8mf8_t op0, vint8mf8_t op1, size_t op2){
return vadd_vv_i8mf8(op0, op1, op2);
}
__rvv_overloaded vint8mf8_t vadd(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, vint8mf8_t op3, size_t op4){
return vadd_vv_i8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m1_t vadd(vint16m1_t op0, vint16m1_t op1, size_t op2){
return vadd_vv_i16m1(op0, op1, op2);
}
__rvv_overloaded vint16m1_t vadd(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, vint16m1_t op3, size_t op4){
return vadd_vv_i16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m2_t vadd(vint16m2_t op0, vint16m2_t op1, size_t op2){
return vadd_vv_i16m2(op0, op1, op2);
}
__rvv_overloaded vint16m2_t vadd(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, vint16m2_t op3, size_t op4){
return vadd_vv_i16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m4_t vadd(vint16m4_t op0, vint16m4_t op1, size_t op2){
return vadd_vv_i16m4(op0, op1, op2);
}
__rvv_overloaded vint16m4_t vadd(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, vint16m4_t op3, size_t op4){
return vadd_vv_i16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m8_t vadd(vint16m8_t op0, vint16m8_t op1, size_t op2){
return vadd_vv_i16m8(op0, op1, op2);
}
__rvv_overloaded vint16m8_t vadd(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, vint16m8_t op3, size_t op4){
return vadd_vv_i16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf2_t vadd(vint16mf2_t op0, vint16mf2_t op1, size_t op2){
return vadd_vv_i16mf2(op0, op1, op2);
}
__rvv_overloaded vint16mf2_t vadd(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, vint16mf2_t op3, size_t op4){
return vadd_vv_i16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf4_t vadd(vint16mf4_t op0, vint16mf4_t op1, size_t op2){
return vadd_vv_i16mf4(op0, op1, op2);
}
__rvv_overloaded vint16mf4_t vadd(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, vint16mf4_t op3, size_t op4){
return vadd_vv_i16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m1_t vadd(vint32m1_t op0, vint32m1_t op1, size_t op2){
return vadd_vv_i32m1(op0, op1, op2);
}
__rvv_overloaded vint32m1_t vadd(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, vint32m1_t op3, size_t op4){
return vadd_vv_i32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m2_t vadd(vint32m2_t op0, vint32m2_t op1, size_t op2){
return vadd_vv_i32m2(op0, op1, op2);
}
__rvv_overloaded vint32m2_t vadd(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, vint32m2_t op3, size_t op4){
return vadd_vv_i32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m4_t vadd(vint32m4_t op0, vint32m4_t op1, size_t op2){
return vadd_vv_i32m4(op0, op1, op2);
}
__rvv_overloaded vint32m4_t vadd(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, vint32m4_t op3, size_t op4){
return vadd_vv_i32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m8_t vadd(vint32m8_t op0, vint32m8_t op1, size_t op2){
return vadd_vv_i32m8(op0, op1, op2);
}
__rvv_overloaded vint32m8_t vadd(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, vint32m8_t op3, size_t op4){
return vadd_vv_i32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32mf2_t vadd(vint32mf2_t op0, vint32mf2_t op1, size_t op2){
return vadd_vv_i32mf2(op0, op1, op2);
}
__rvv_overloaded vint32mf2_t vadd(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, vint32mf2_t op3, size_t op4){
return vadd_vv_i32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m1_t vadd(vint64m1_t op0, vint64m1_t op1, size_t op2){
return vadd_vv_i64m1(op0, op1, op2);
}
__rvv_overloaded vint64m1_t vadd(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, vint64m1_t op3, size_t op4){
return vadd_vv_i64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m2_t vadd(vint64m2_t op0, vint64m2_t op1, size_t op2){
return vadd_vv_i64m2(op0, op1, op2);
}
__rvv_overloaded vint64m2_t vadd(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, vint64m2_t op3, size_t op4){
return vadd_vv_i64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m4_t vadd(vint64m4_t op0, vint64m4_t op1, size_t op2){
return vadd_vv_i64m4(op0, op1, op2);
}
__rvv_overloaded vint64m4_t vadd(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, vint64m4_t op3, size_t op4){
return vadd_vv_i64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m8_t vadd(vint64m8_t op0, vint64m8_t op1, size_t op2){
return vadd_vv_i64m8(op0, op1, op2);
}
__rvv_overloaded vint64m8_t vadd(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, vint64m8_t op3, size_t op4){
return vadd_vv_i64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m1_t vle8(vbool8_t op0, vint8m1_t op1, const int8_t * op2, size_t op3){
return vle8_v_i8m1_m(op0, op1, op2, op3);
}
__rvv_overloaded vint8m2_t vle8(vbool4_t op0, vint8m2_t op1, const int8_t * op2, size_t op3){
return vle8_v_i8m2_m(op0, op1, op2, op3);
}
__rvv_overloaded vint8m4_t vle8(vbool2_t op0, vint8m4_t op1, const int8_t * op2, size_t op3){
return vle8_v_i8m4_m(op0, op1, op2, op3);
}
__rvv_overloaded vint8m8_t vle8(vbool1_t op0, vint8m8_t op1, const int8_t * op2, size_t op3){
return vle8_v_i8m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vint8mf2_t vle8(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, size_t op3){
return vle8_v_i8mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded vint8mf4_t vle8(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, size_t op3){
return vle8_v_i8mf4_m(op0, op1, op2, op3);
}
__rvv_overloaded vint8mf8_t vle8(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, size_t op3){
return vle8_v_i8mf8_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint64m1_t vloxei64(const uint64_t * op0, vuint64m1_t op1, size_t op2){
return vloxei64_v_u64m1(op0, op1, op2);
}
__rvv_overloaded vuint64m1_t vloxei64(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint64m1_t op3, size_t op4){
return vloxei64_v_u64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m2_t vloxei64(const uint64_t * op0, vuint64m2_t op1, size_t op2){
return vloxei64_v_u64m2(op0, op1, op2);
}
__rvv_overloaded vuint64m2_t vloxei64(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint64m2_t op3, size_t op4){
return vloxei64_v_u64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m4_t vloxei64(const uint64_t * op0, vuint64m4_t op1, size_t op2){
return vloxei64_v_u64m4(op0, op1, op2);
}
__rvv_overloaded vuint64m4_t vloxei64(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint64m4_t op3, size_t op4){
return vloxei64_v_u64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m8_t vloxei64(const uint64_t * op0, vuint64m8_t op1, size_t op2){
return vloxei64_v_u64m8(op0, op1, op2);
}
__rvv_overloaded vuint64m8_t vloxei64(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint64m8_t op3, size_t op4){
return vloxei64_v_u64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m1_t vadd(vint8m1_t op0, int8_t op1, size_t op2){
return vadd_vx_i8m1(op0, op1, op2);
}
__rvv_overloaded vint8m1_t vadd(vbool8_t op0, vint8m1_t op1, vint8m1_t op2, int8_t op3, size_t op4){
return vadd_vx_i8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m2_t vadd(vint8m2_t op0, int8_t op1, size_t op2){
return vadd_vx_i8m2(op0, op1, op2);
}
__rvv_overloaded vint8m2_t vadd(vbool4_t op0, vint8m2_t op1, vint8m2_t op2, int8_t op3, size_t op4){
return vadd_vx_i8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m4_t vadd(vint8m4_t op0, int8_t op1, size_t op2){
return vadd_vx_i8m4(op0, op1, op2);
}
__rvv_overloaded vint8m4_t vadd(vbool2_t op0, vint8m4_t op1, vint8m4_t op2, int8_t op3, size_t op4){
return vadd_vx_i8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m8_t vadd(vint8m8_t op0, int8_t op1, size_t op2){
return vadd_vx_i8m8(op0, op1, op2);
}
__rvv_overloaded vint8m8_t vadd(vbool1_t op0, vint8m8_t op1, vint8m8_t op2, int8_t op3, size_t op4){
return vadd_vx_i8m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf2_t vadd(vint8mf2_t op0, int8_t op1, size_t op2){
return vadd_vx_i8mf2(op0, op1, op2);
}
__rvv_overloaded vint8mf2_t vadd(vbool16_t op0, vint8mf2_t op1, vint8mf2_t op2, int8_t op3, size_t op4){
return vadd_vx_i8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf4_t vadd(vint8mf4_t op0, int8_t op1, size_t op2){
return vadd_vx_i8mf4(op0, op1, op2);
}
__rvv_overloaded vint8mf4_t vadd(vbool32_t op0, vint8mf4_t op1, vint8mf4_t op2, int8_t op3, size_t op4){
return vadd_vx_i8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf8_t vadd(vint8mf8_t op0, int8_t op1, size_t op2){
return vadd_vx_i8mf8(op0, op1, op2);
}
__rvv_overloaded vint8mf8_t vadd(vbool64_t op0, vint8mf8_t op1, vint8mf8_t op2, int8_t op3, size_t op4){
return vadd_vx_i8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m1_t vadd(vint16m1_t op0, int16_t op1, size_t op2){
return vadd_vx_i16m1(op0, op1, op2);
}
__rvv_overloaded vint16m1_t vadd(vbool16_t op0, vint16m1_t op1, vint16m1_t op2, int16_t op3, size_t op4){
return vadd_vx_i16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m2_t vadd(vint16m2_t op0, int16_t op1, size_t op2){
return vadd_vx_i16m2(op0, op1, op2);
}
__rvv_overloaded vint16m2_t vadd(vbool8_t op0, vint16m2_t op1, vint16m2_t op2, int16_t op3, size_t op4){
return vadd_vx_i16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m4_t vadd(vint16m4_t op0, int16_t op1, size_t op2){
return vadd_vx_i16m4(op0, op1, op2);
}
__rvv_overloaded vint16m4_t vadd(vbool4_t op0, vint16m4_t op1, vint16m4_t op2, int16_t op3, size_t op4){
return vadd_vx_i16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m8_t vadd(vint16m8_t op0, int16_t op1, size_t op2){
return vadd_vx_i16m8(op0, op1, op2);
}
__rvv_overloaded vint16m8_t vadd(vbool2_t op0, vint16m8_t op1, vint16m8_t op2, int16_t op3, size_t op4){
return vadd_vx_i16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf2_t vadd(vint16mf2_t op0, int16_t op1, size_t op2){
return vadd_vx_i16mf2(op0, op1, op2);
}
__rvv_overloaded vint16mf2_t vadd(vbool32_t op0, vint16mf2_t op1, vint16mf2_t op2, int16_t op3, size_t op4){
return vadd_vx_i16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf4_t vadd(vint16mf4_t op0, int16_t op1, size_t op2){
return vadd_vx_i16mf4(op0, op1, op2);
}
__rvv_overloaded vint16mf4_t vadd(vbool64_t op0, vint16mf4_t op1, vint16mf4_t op2, int16_t op3, size_t op4){
return vadd_vx_i16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m1_t vadd(vint32m1_t op0, int32_t op1, size_t op2){
return vadd_vx_i32m1(op0, op1, op2);
}
__rvv_overloaded vint32m1_t vadd(vbool32_t op0, vint32m1_t op1, vint32m1_t op2, int32_t op3, size_t op4){
return vadd_vx_i32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m2_t vadd(vint32m2_t op0, int32_t op1, size_t op2){
return vadd_vx_i32m2(op0, op1, op2);
}
__rvv_overloaded vint32m2_t vadd(vbool16_t op0, vint32m2_t op1, vint32m2_t op2, int32_t op3, size_t op4){
return vadd_vx_i32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m4_t vadd(vint32m4_t op0, int32_t op1, size_t op2){
return vadd_vx_i32m4(op0, op1, op2);
}
__rvv_overloaded vint32m4_t vadd(vbool8_t op0, vint32m4_t op1, vint32m4_t op2, int32_t op3, size_t op4){
return vadd_vx_i32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m8_t vadd(vint32m8_t op0, int32_t op1, size_t op2){
return vadd_vx_i32m8(op0, op1, op2);
}
__rvv_overloaded vint32m8_t vadd(vbool4_t op0, vint32m8_t op1, vint32m8_t op2, int32_t op3, size_t op4){
return vadd_vx_i32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32mf2_t vadd(vint32mf2_t op0, int32_t op1, size_t op2){
return vadd_vx_i32mf2(op0, op1, op2);
}
__rvv_overloaded vint32mf2_t vadd(vbool64_t op0, vint32mf2_t op1, vint32mf2_t op2, int32_t op3, size_t op4){
return vadd_vx_i32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m1_t vadd(vint64m1_t op0, int64_t op1, size_t op2){
return vadd_vx_i64m1(op0, op1, op2);
}
__rvv_overloaded vint64m1_t vadd(vbool64_t op0, vint64m1_t op1, vint64m1_t op2, int64_t op3, size_t op4){
return vadd_vx_i64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m2_t vadd(vint64m2_t op0, int64_t op1, size_t op2){
return vadd_vx_i64m2(op0, op1, op2);
}
__rvv_overloaded vint64m2_t vadd(vbool32_t op0, vint64m2_t op1, vint64m2_t op2, int64_t op3, size_t op4){
return vadd_vx_i64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m4_t vadd(vint64m4_t op0, int64_t op1, size_t op2){
return vadd_vx_i64m4(op0, op1, op2);
}
__rvv_overloaded vint64m4_t vadd(vbool16_t op0, vint64m4_t op1, vint64m4_t op2, int64_t op3, size_t op4){
return vadd_vx_i64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m8_t vadd(vint64m8_t op0, int64_t op1, size_t op2){
return vadd_vx_i64m8(op0, op1, op2);
}
__rvv_overloaded vint64m8_t vadd(vbool8_t op0, vint64m8_t op1, vint64m8_t op2, int64_t op3, size_t op4){
return vadd_vx_i64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded void vse8(int8_t * op0, vint8m1_t op1, size_t op2){
return vse8_v_i8m1(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool8_t op0, int8_t * op1, vint8m1_t op2, size_t op3){
return vse8_v_i8m1_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(int8_t * op0, vint8m2_t op1, size_t op2){
return vse8_v_i8m2(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool4_t op0, int8_t * op1, vint8m2_t op2, size_t op3){
return vse8_v_i8m2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(int8_t * op0, vint8m4_t op1, size_t op2){
return vse8_v_i8m4(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool2_t op0, int8_t * op1, vint8m4_t op2, size_t op3){
return vse8_v_i8m4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(int8_t * op0, vint8m8_t op1, size_t op2){
return vse8_v_i8m8(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool1_t op0, int8_t * op1, vint8m8_t op2, size_t op3){
return vse8_v_i8m8_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(int8_t * op0, vint8mf2_t op1, size_t op2){
return vse8_v_i8mf2(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool16_t op0, int8_t * op1, vint8mf2_t op2, size_t op3){
return vse8_v_i8mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(int8_t * op0, vint8mf4_t op1, size_t op2){
return vse8_v_i8mf4(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool32_t op0, int8_t * op1, vint8mf4_t op2, size_t op3){
return vse8_v_i8mf4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(int8_t * op0, vint8mf8_t op1, size_t op2){
return vse8_v_i8mf8(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool64_t op0, int8_t * op1, vint8mf8_t op2, size_t op3){
return vse8_v_i8mf8_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint8m1_t vadd(vuint8m1_t op0, vuint8m1_t op1, size_t op2){
return vadd_vv_u8m1(op0, op1, op2);
}
__rvv_overloaded vuint8m1_t vadd(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, vuint8m1_t op3, size_t op4){
return vadd_vv_u8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m2_t vadd(vuint8m2_t op0, vuint8m2_t op1, size_t op2){
return vadd_vv_u8m2(op0, op1, op2);
}
__rvv_overloaded vuint8m2_t vadd(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, vuint8m2_t op3, size_t op4){
return vadd_vv_u8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m4_t vadd(vuint8m4_t op0, vuint8m4_t op1, size_t op2){
return vadd_vv_u8m4(op0, op1, op2);
}
__rvv_overloaded vuint8m4_t vadd(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, vuint8m4_t op3, size_t op4){
return vadd_vv_u8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m8_t vadd(vuint8m8_t op0, vuint8m8_t op1, size_t op2){
return vadd_vv_u8m8(op0, op1, op2);
}
__rvv_overloaded vuint8m8_t vadd(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, vuint8m8_t op3, size_t op4){
return vadd_vv_u8m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf2_t vadd(vuint8mf2_t op0, vuint8mf2_t op1, size_t op2){
return vadd_vv_u8mf2(op0, op1, op2);
}
__rvv_overloaded vuint8mf2_t vadd(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, vuint8mf2_t op3, size_t op4){
return vadd_vv_u8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf4_t vadd(vuint8mf4_t op0, vuint8mf4_t op1, size_t op2){
return vadd_vv_u8mf4(op0, op1, op2);
}
__rvv_overloaded vuint8mf4_t vadd(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, vuint8mf4_t op3, size_t op4){
return vadd_vv_u8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf8_t vadd(vuint8mf8_t op0, vuint8mf8_t op1, size_t op2){
return vadd_vv_u8mf8(op0, op1, op2);
}
__rvv_overloaded vuint8mf8_t vadd(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, vuint8mf8_t op3, size_t op4){
return vadd_vv_u8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m1_t vadd(vuint16m1_t op0, vuint16m1_t op1, size_t op2){
return vadd_vv_u16m1(op0, op1, op2);
}
__rvv_overloaded vuint16m1_t vadd(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, vuint16m1_t op3, size_t op4){
return vadd_vv_u16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m2_t vadd(vuint16m2_t op0, vuint16m2_t op1, size_t op2){
return vadd_vv_u16m2(op0, op1, op2);
}
__rvv_overloaded vuint16m2_t vadd(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, vuint16m2_t op3, size_t op4){
return vadd_vv_u16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m4_t vadd(vuint16m4_t op0, vuint16m4_t op1, size_t op2){
return vadd_vv_u16m4(op0, op1, op2);
}
__rvv_overloaded vuint16m4_t vadd(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, vuint16m4_t op3, size_t op4){
return vadd_vv_u16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m8_t vadd(vuint16m8_t op0, vuint16m8_t op1, size_t op2){
return vadd_vv_u16m8(op0, op1, op2);
}
__rvv_overloaded vuint16m8_t vadd(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, vuint16m8_t op3, size_t op4){
return vadd_vv_u16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf2_t vadd(vuint16mf2_t op0, vuint16mf2_t op1, size_t op2){
return vadd_vv_u16mf2(op0, op1, op2);
}
__rvv_overloaded vuint16mf2_t vadd(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, vuint16mf2_t op3, size_t op4){
return vadd_vv_u16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf4_t vadd(vuint16mf4_t op0, vuint16mf4_t op1, size_t op2){
return vadd_vv_u16mf4(op0, op1, op2);
}
__rvv_overloaded vuint16mf4_t vadd(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, vuint16mf4_t op3, size_t op4){
return vadd_vv_u16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m1_t vadd(vuint32m1_t op0, vuint32m1_t op1, size_t op2){
return vadd_vv_u32m1(op0, op1, op2);
}
__rvv_overloaded vuint32m1_t vadd(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, vuint32m1_t op3, size_t op4){
return vadd_vv_u32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m2_t vadd(vuint32m2_t op0, vuint32m2_t op1, size_t op2){
return vadd_vv_u32m2(op0, op1, op2);
}
__rvv_overloaded vuint32m2_t vadd(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, vuint32m2_t op3, size_t op4){
return vadd_vv_u32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m4_t vadd(vuint32m4_t op0, vuint32m4_t op1, size_t op2){
return vadd_vv_u32m4(op0, op1, op2);
}
__rvv_overloaded vuint32m4_t vadd(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, vuint32m4_t op3, size_t op4){
return vadd_vv_u32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m8_t vadd(vuint32m8_t op0, vuint32m8_t op1, size_t op2){
return vadd_vv_u32m8(op0, op1, op2);
}
__rvv_overloaded vuint32m8_t vadd(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, vuint32m8_t op3, size_t op4){
return vadd_vv_u32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32mf2_t vadd(vuint32mf2_t op0, vuint32mf2_t op1, size_t op2){
return vadd_vv_u32mf2(op0, op1, op2);
}
__rvv_overloaded vuint32mf2_t vadd(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, vuint32mf2_t op3, size_t op4){
return vadd_vv_u32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m1_t vadd(vuint64m1_t op0, vuint64m1_t op1, size_t op2){
return vadd_vv_u64m1(op0, op1, op2);
}
__rvv_overloaded vuint64m1_t vadd(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, vuint64m1_t op3, size_t op4){
return vadd_vv_u64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m2_t vadd(vuint64m2_t op0, vuint64m2_t op1, size_t op2){
return vadd_vv_u64m2(op0, op1, op2);
}
__rvv_overloaded vuint64m2_t vadd(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, vuint64m2_t op3, size_t op4){
return vadd_vv_u64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m4_t vadd(vuint64m4_t op0, vuint64m4_t op1, size_t op2){
return vadd_vv_u64m4(op0, op1, op2);
}
__rvv_overloaded vuint64m4_t vadd(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, vuint64m4_t op3, size_t op4){
return vadd_vv_u64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m8_t vadd(vuint64m8_t op0, vuint64m8_t op1, size_t op2){
return vadd_vv_u64m8(op0, op1, op2);
}
__rvv_overloaded vuint64m8_t vadd(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, vuint64m8_t op3, size_t op4){
return vadd_vv_u64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m1_t vadd(vuint8m1_t op0, uint8_t op1, size_t op2){
return vadd_vx_u8m1(op0, op1, op2);
}
__rvv_overloaded vuint8m1_t vadd(vbool8_t op0, vuint8m1_t op1, vuint8m1_t op2, uint8_t op3, size_t op4){
return vadd_vx_u8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m2_t vadd(vuint8m2_t op0, uint8_t op1, size_t op2){
return vadd_vx_u8m2(op0, op1, op2);
}
__rvv_overloaded vuint8m2_t vadd(vbool4_t op0, vuint8m2_t op1, vuint8m2_t op2, uint8_t op3, size_t op4){
return vadd_vx_u8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m4_t vadd(vuint8m4_t op0, uint8_t op1, size_t op2){
return vadd_vx_u8m4(op0, op1, op2);
}
__rvv_overloaded vuint8m4_t vadd(vbool2_t op0, vuint8m4_t op1, vuint8m4_t op2, uint8_t op3, size_t op4){
return vadd_vx_u8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m8_t vadd(vuint8m8_t op0, uint8_t op1, size_t op2){
return vadd_vx_u8m8(op0, op1, op2);
}
__rvv_overloaded vuint8m8_t vadd(vbool1_t op0, vuint8m8_t op1, vuint8m8_t op2, uint8_t op3, size_t op4){
return vadd_vx_u8m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf2_t vadd(vuint8mf2_t op0, uint8_t op1, size_t op2){
return vadd_vx_u8mf2(op0, op1, op2);
}
__rvv_overloaded vuint8mf2_t vadd(vbool16_t op0, vuint8mf2_t op1, vuint8mf2_t op2, uint8_t op3, size_t op4){
return vadd_vx_u8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf4_t vadd(vuint8mf4_t op0, uint8_t op1, size_t op2){
return vadd_vx_u8mf4(op0, op1, op2);
}
__rvv_overloaded vuint8mf4_t vadd(vbool32_t op0, vuint8mf4_t op1, vuint8mf4_t op2, uint8_t op3, size_t op4){
return vadd_vx_u8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf8_t vadd(vuint8mf8_t op0, uint8_t op1, size_t op2){
return vadd_vx_u8mf8(op0, op1, op2);
}
__rvv_overloaded vuint8mf8_t vadd(vbool64_t op0, vuint8mf8_t op1, vuint8mf8_t op2, uint8_t op3, size_t op4){
return vadd_vx_u8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m1_t vadd(vuint16m1_t op0, uint16_t op1, size_t op2){
return vadd_vx_u16m1(op0, op1, op2);
}
__rvv_overloaded vuint16m1_t vadd(vbool16_t op0, vuint16m1_t op1, vuint16m1_t op2, uint16_t op3, size_t op4){
return vadd_vx_u16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m2_t vadd(vuint16m2_t op0, uint16_t op1, size_t op2){
return vadd_vx_u16m2(op0, op1, op2);
}
__rvv_overloaded vuint16m2_t vadd(vbool8_t op0, vuint16m2_t op1, vuint16m2_t op2, uint16_t op3, size_t op4){
return vadd_vx_u16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m4_t vadd(vuint16m4_t op0, uint16_t op1, size_t op2){
return vadd_vx_u16m4(op0, op1, op2);
}
__rvv_overloaded vuint16m4_t vadd(vbool4_t op0, vuint16m4_t op1, vuint16m4_t op2, uint16_t op3, size_t op4){
return vadd_vx_u16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m8_t vadd(vuint16m8_t op0, uint16_t op1, size_t op2){
return vadd_vx_u16m8(op0, op1, op2);
}
__rvv_overloaded vuint16m8_t vadd(vbool2_t op0, vuint16m8_t op1, vuint16m8_t op2, uint16_t op3, size_t op4){
return vadd_vx_u16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf2_t vadd(vuint16mf2_t op0, uint16_t op1, size_t op2){
return vadd_vx_u16mf2(op0, op1, op2);
}
__rvv_overloaded vuint16mf2_t vadd(vbool32_t op0, vuint16mf2_t op1, vuint16mf2_t op2, uint16_t op3, size_t op4){
return vadd_vx_u16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf4_t vadd(vuint16mf4_t op0, uint16_t op1, size_t op2){
return vadd_vx_u16mf4(op0, op1, op2);
}
__rvv_overloaded vuint16mf4_t vadd(vbool64_t op0, vuint16mf4_t op1, vuint16mf4_t op2, uint16_t op3, size_t op4){
return vadd_vx_u16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m1_t vadd(vuint32m1_t op0, uint32_t op1, size_t op2){
return vadd_vx_u32m1(op0, op1, op2);
}
__rvv_overloaded vuint32m1_t vadd(vbool32_t op0, vuint32m1_t op1, vuint32m1_t op2, uint32_t op3, size_t op4){
return vadd_vx_u32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m2_t vadd(vuint32m2_t op0, uint32_t op1, size_t op2){
return vadd_vx_u32m2(op0, op1, op2);
}
__rvv_overloaded vuint32m2_t vadd(vbool16_t op0, vuint32m2_t op1, vuint32m2_t op2, uint32_t op3, size_t op4){
return vadd_vx_u32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m4_t vadd(vuint32m4_t op0, uint32_t op1, size_t op2){
return vadd_vx_u32m4(op0, op1, op2);
}
__rvv_overloaded vuint32m4_t vadd(vbool8_t op0, vuint32m4_t op1, vuint32m4_t op2, uint32_t op3, size_t op4){
return vadd_vx_u32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m8_t vadd(vuint32m8_t op0, uint32_t op1, size_t op2){
return vadd_vx_u32m8(op0, op1, op2);
}
__rvv_overloaded vuint32m8_t vadd(vbool4_t op0, vuint32m8_t op1, vuint32m8_t op2, uint32_t op3, size_t op4){
return vadd_vx_u32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32mf2_t vadd(vuint32mf2_t op0, uint32_t op1, size_t op2){
return vadd_vx_u32mf2(op0, op1, op2);
}
__rvv_overloaded vuint32mf2_t vadd(vbool64_t op0, vuint32mf2_t op1, vuint32mf2_t op2, uint32_t op3, size_t op4){
return vadd_vx_u32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m1_t vadd(vuint64m1_t op0, uint64_t op1, size_t op2){
return vadd_vx_u64m1(op0, op1, op2);
}
__rvv_overloaded vuint64m1_t vadd(vbool64_t op0, vuint64m1_t op1, vuint64m1_t op2, uint64_t op3, size_t op4){
return vadd_vx_u64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m2_t vadd(vuint64m2_t op0, uint64_t op1, size_t op2){
return vadd_vx_u64m2(op0, op1, op2);
}
__rvv_overloaded vuint64m2_t vadd(vbool32_t op0, vuint64m2_t op1, vuint64m2_t op2, uint64_t op3, size_t op4){
return vadd_vx_u64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m4_t vadd(vuint64m4_t op0, uint64_t op1, size_t op2){
return vadd_vx_u64m4(op0, op1, op2);
}
__rvv_overloaded vuint64m4_t vadd(vbool16_t op0, vuint64m4_t op1, vuint64m4_t op2, uint64_t op3, size_t op4){
return vadd_vx_u64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m8_t vadd(vuint64m8_t op0, uint64_t op1, size_t op2){
return vadd_vx_u64m8(op0, op1, op2);
}
__rvv_overloaded vuint64m8_t vadd(vbool8_t op0, vuint64m8_t op1, vuint64m8_t op2, uint64_t op3, size_t op4){
return vadd_vx_u64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded void vse8(uint8_t * op0, vuint8m1_t op1, size_t op2){
return vse8_v_u8m1(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool8_t op0, uint8_t * op1, vuint8m1_t op2, size_t op3){
return vse8_v_u8m1_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(uint8_t * op0, vuint8m2_t op1, size_t op2){
return vse8_v_u8m2(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool4_t op0, uint8_t * op1, vuint8m2_t op2, size_t op3){
return vse8_v_u8m2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(uint8_t * op0, vuint8m4_t op1, size_t op2){
return vse8_v_u8m4(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool2_t op0, uint8_t * op1, vuint8m4_t op2, size_t op3){
return vse8_v_u8m4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(uint8_t * op0, vuint8m8_t op1, size_t op2){
return vse8_v_u8m8(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool1_t op0, uint8_t * op1, vuint8m8_t op2, size_t op3){
return vse8_v_u8m8_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(uint8_t * op0, vuint8mf2_t op1, size_t op2){
return vse8_v_u8mf2(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool16_t op0, uint8_t * op1, vuint8mf2_t op2, size_t op3){
return vse8_v_u8mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(uint8_t * op0, vuint8mf4_t op1, size_t op2){
return vse8_v_u8mf4(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool32_t op0, uint8_t * op1, vuint8mf4_t op2, size_t op3){
return vse8_v_u8mf4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse8(uint8_t * op0, vuint8mf8_t op1, size_t op2){
return vse8_v_u8mf8(op0, op1, op2);
}
__rvv_overloaded void vse8(vbool64_t op0, uint8_t * op1, vuint8mf8_t op2, size_t op3){
return vse8_v_u8mf8_m(op0, op1, op2, op3);
}
__rvv_overloaded vint16m1_t vle16(vbool16_t op0, vint16m1_t op1, const int16_t * op2, size_t op3){
return vle16_v_i16m1_m(op0, op1, op2, op3);
}
__rvv_overloaded vint16m2_t vle16(vbool8_t op0, vint16m2_t op1, const int16_t * op2, size_t op3){
return vle16_v_i16m2_m(op0, op1, op2, op3);
}
__rvv_overloaded vint16m4_t vle16(vbool4_t op0, vint16m4_t op1, const int16_t * op2, size_t op3){
return vle16_v_i16m4_m(op0, op1, op2, op3);
}
__rvv_overloaded vint16m8_t vle16(vbool2_t op0, vint16m8_t op1, const int16_t * op2, size_t op3){
return vle16_v_i16m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vint16mf2_t vle16(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, size_t op3){
return vle16_v_i16mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded vint16mf4_t vle16(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, size_t op3){
return vle16_v_i16mf4_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint16m1_t vle16(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, size_t op3){
return vle16_v_u16m1_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint16m2_t vle16(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, size_t op3){
return vle16_v_u16m2_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint16m4_t vle16(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, size_t op3){
return vle16_v_u16m4_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint16m8_t vle16(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, size_t op3){
return vle16_v_u16m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint16mf2_t vle16(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, size_t op3){
return vle16_v_u16mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint16mf4_t vle16(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, size_t op3){
return vle16_v_u16mf4_m(op0, op1, op2, op3);
}
__rvv_overloaded vint32m1_t vle32(vbool32_t op0, vint32m1_t op1, const int32_t * op2, size_t op3){
return vle32_v_i32m1_m(op0, op1, op2, op3);
}
__rvv_overloaded vint32m2_t vle32(vbool16_t op0, vint32m2_t op1, const int32_t * op2, size_t op3){
return vle32_v_i32m2_m(op0, op1, op2, op3);
}
__rvv_overloaded vint32m4_t vle32(vbool8_t op0, vint32m4_t op1, const int32_t * op2, size_t op3){
return vle32_v_i32m4_m(op0, op1, op2, op3);
}
__rvv_overloaded vint32m8_t vle32(vbool4_t op0, vint32m8_t op1, const int32_t * op2, size_t op3){
return vle32_v_i32m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vint32mf2_t vle32(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, size_t op3){
return vle32_v_i32mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint32m1_t vle32(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, size_t op3){
return vle32_v_u32m1_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint32m2_t vle32(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, size_t op3){
return vle32_v_u32m2_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint32m4_t vle32(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, size_t op3){
return vle32_v_u32m4_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint32m8_t vle32(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, size_t op3){
return vle32_v_u32m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint32mf2_t vle32(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, size_t op3){
return vle32_v_u32mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded vint64m1_t vle64(vbool64_t op0, vint64m1_t op1, const int64_t * op2, size_t op3){
return vle64_v_i64m1_m(op0, op1, op2, op3);
}
__rvv_overloaded vint64m2_t vle64(vbool32_t op0, vint64m2_t op1, const int64_t * op2, size_t op3){
return vle64_v_i64m2_m(op0, op1, op2, op3);
}
__rvv_overloaded vint64m4_t vle64(vbool16_t op0, vint64m4_t op1, const int64_t * op2, size_t op3){
return vle64_v_i64m4_m(op0, op1, op2, op3);
}
__rvv_overloaded vint64m8_t vle64(vbool8_t op0, vint64m8_t op1, const int64_t * op2, size_t op3){
return vle64_v_i64m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint64m1_t vle64(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, size_t op3){
return vle64_v_u64m1_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint64m2_t vle64(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, size_t op3){
return vle64_v_u64m2_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint64m4_t vle64(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, size_t op3){
return vle64_v_u64m4_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint64m8_t vle64(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, size_t op3){
return vle64_v_u64m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint8m1_t vle8(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, size_t op3){
return vle8_v_u8m1_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint8m2_t vle8(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, size_t op3){
return vle8_v_u8m2_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint8m4_t vle8(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, size_t op3){
return vle8_v_u8m4_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint8m8_t vle8(vbool1_t op0, vuint8m8_t op1, const uint8_t * op2, size_t op3){
return vle8_v_u8m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint8mf2_t vle8(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, size_t op3){
return vle8_v_u8mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint8mf4_t vle8(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, size_t op3){
return vle8_v_u8mf4_m(op0, op1, op2, op3);
}
__rvv_overloaded vuint8mf8_t vle8(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, size_t op3){
return vle8_v_u8mf8_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(int16_t * op0, vint16m1_t op1, size_t op2){
return vse16_v_i16m1(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool16_t op0, int16_t * op1, vint16m1_t op2, size_t op3){
return vse16_v_i16m1_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(int16_t * op0, vint16m2_t op1, size_t op2){
return vse16_v_i16m2(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool8_t op0, int16_t * op1, vint16m2_t op2, size_t op3){
return vse16_v_i16m2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(int16_t * op0, vint16m4_t op1, size_t op2){
return vse16_v_i16m4(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool4_t op0, int16_t * op1, vint16m4_t op2, size_t op3){
return vse16_v_i16m4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(int16_t * op0, vint16m8_t op1, size_t op2){
return vse16_v_i16m8(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool2_t op0, int16_t * op1, vint16m8_t op2, size_t op3){
return vse16_v_i16m8_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(int16_t * op0, vint16mf2_t op1, size_t op2){
return vse16_v_i16mf2(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool32_t op0, int16_t * op1, vint16mf2_t op2, size_t op3){
return vse16_v_i16mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(int16_t * op0, vint16mf4_t op1, size_t op2){
return vse16_v_i16mf4(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool64_t op0, int16_t * op1, vint16mf4_t op2, size_t op3){
return vse16_v_i16mf4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(uint16_t * op0, vuint16m1_t op1, size_t op2){
return vse16_v_u16m1(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool16_t op0, uint16_t * op1, vuint16m1_t op2, size_t op3){
return vse16_v_u16m1_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(uint16_t * op0, vuint16m2_t op1, size_t op2){
return vse16_v_u16m2(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool8_t op0, uint16_t * op1, vuint16m2_t op2, size_t op3){
return vse16_v_u16m2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(uint16_t * op0, vuint16m4_t op1, size_t op2){
return vse16_v_u16m4(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool4_t op0, uint16_t * op1, vuint16m4_t op2, size_t op3){
return vse16_v_u16m4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(uint16_t * op0, vuint16m8_t op1, size_t op2){
return vse16_v_u16m8(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool2_t op0, uint16_t * op1, vuint16m8_t op2, size_t op3){
return vse16_v_u16m8_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(uint16_t * op0, vuint16mf2_t op1, size_t op2){
return vse16_v_u16mf2(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool32_t op0, uint16_t * op1, vuint16mf2_t op2, size_t op3){
return vse16_v_u16mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse16(uint16_t * op0, vuint16mf4_t op1, size_t op2){
return vse16_v_u16mf4(op0, op1, op2);
}
__rvv_overloaded void vse16(vbool64_t op0, uint16_t * op1, vuint16mf4_t op2, size_t op3){
return vse16_v_u16mf4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(int32_t * op0, vint32m1_t op1, size_t op2){
return vse32_v_i32m1(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool32_t op0, int32_t * op1, vint32m1_t op2, size_t op3){
return vse32_v_i32m1_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(int32_t * op0, vint32m2_t op1, size_t op2){
return vse32_v_i32m2(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool16_t op0, int32_t * op1, vint32m2_t op2, size_t op3){
return vse32_v_i32m2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(int32_t * op0, vint32m4_t op1, size_t op2){
return vse32_v_i32m4(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool8_t op0, int32_t * op1, vint32m4_t op2, size_t op3){
return vse32_v_i32m4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(int32_t * op0, vint32m8_t op1, size_t op2){
return vse32_v_i32m8(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool4_t op0, int32_t * op1, vint32m8_t op2, size_t op3){
return vse32_v_i32m8_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(int32_t * op0, vint32mf2_t op1, size_t op2){
return vse32_v_i32mf2(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool64_t op0, int32_t * op1, vint32mf2_t op2, size_t op3){
return vse32_v_i32mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(uint32_t * op0, vuint32m1_t op1, size_t op2){
return vse32_v_u32m1(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool32_t op0, uint32_t * op1, vuint32m1_t op2, size_t op3){
return vse32_v_u32m1_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(uint32_t * op0, vuint32m2_t op1, size_t op2){
return vse32_v_u32m2(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool16_t op0, uint32_t * op1, vuint32m2_t op2, size_t op3){
return vse32_v_u32m2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(uint32_t * op0, vuint32m4_t op1, size_t op2){
return vse32_v_u32m4(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool8_t op0, uint32_t * op1, vuint32m4_t op2, size_t op3){
return vse32_v_u32m4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(uint32_t * op0, vuint32m8_t op1, size_t op2){
return vse32_v_u32m8(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool4_t op0, uint32_t * op1, vuint32m8_t op2, size_t op3){
return vse32_v_u32m8_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(uint32_t * op0, vuint32mf2_t op1, size_t op2){
return vse32_v_u32mf2(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool64_t op0, uint32_t * op1, vuint32mf2_t op2, size_t op3){
return vse32_v_u32mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(int64_t * op0, vint64m1_t op1, size_t op2){
return vse64_v_i64m1(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool64_t op0, int64_t * op1, vint64m1_t op2, size_t op3){
return vse64_v_i64m1_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(int64_t * op0, vint64m2_t op1, size_t op2){
return vse64_v_i64m2(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool32_t op0, int64_t * op1, vint64m2_t op2, size_t op3){
return vse64_v_i64m2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(int64_t * op0, vint64m4_t op1, size_t op2){
return vse64_v_i64m4(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool16_t op0, int64_t * op1, vint64m4_t op2, size_t op3){
return vse64_v_i64m4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(int64_t * op0, vint64m8_t op1, size_t op2){
return vse64_v_i64m8(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool8_t op0, int64_t * op1, vint64m8_t op2, size_t op3){
return vse64_v_i64m8_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(uint64_t * op0, vuint64m1_t op1, size_t op2){
return vse64_v_u64m1(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool64_t op0, uint64_t * op1, vuint64m1_t op2, size_t op3){
return vse64_v_u64m1_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(uint64_t * op0, vuint64m2_t op1, size_t op2){
return vse64_v_u64m2(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool32_t op0, uint64_t * op1, vuint64m2_t op2, size_t op3){
return vse64_v_u64m2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(uint64_t * op0, vuint64m4_t op1, size_t op2){
return vse64_v_u64m4(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool16_t op0, uint64_t * op1, vuint64m4_t op2, size_t op3){
return vse64_v_u64m4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(uint64_t * op0, vuint64m8_t op1, size_t op2){
return vse64_v_u64m8(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool8_t op0, uint64_t * op1, vuint64m8_t op2, size_t op3){
return vse64_v_u64m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vint8m1_t vluxei8(const int8_t * op0, vuint8m1_t op1, size_t op2){
return vluxei8_v_i8m1(op0, op1, op2);
}
__rvv_overloaded vint8m1_t vluxei8(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint8m1_t op3, size_t op4){
return vluxei8_v_i8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m2_t vluxei8(const int8_t * op0, vuint8m2_t op1, size_t op2){
return vluxei8_v_i8m2(op0, op1, op2);
}
__rvv_overloaded vint8m2_t vluxei8(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint8m2_t op3, size_t op4){
return vluxei8_v_i8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m4_t vluxei8(const int8_t * op0, vuint8m4_t op1, size_t op2){
return vluxei8_v_i8m4(op0, op1, op2);
}
__rvv_overloaded vint8m4_t vluxei8(vbool2_t op0, vint8m4_t op1, const int8_t * op2, vuint8m4_t op3, size_t op4){
return vluxei8_v_i8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m8_t vluxei8(const int8_t * op0, vuint8m8_t op1, size_t op2){
return vluxei8_v_i8m8(op0, op1, op2);
}
__rvv_overloaded vint8m8_t vluxei8(vbool1_t op0, vint8m8_t op1, const int8_t * op2, vuint8m8_t op3, size_t op4){
return vluxei8_v_i8m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf2_t vluxei8(const int8_t * op0, vuint8mf2_t op1, size_t op2){
return vluxei8_v_i8mf2(op0, op1, op2);
}
__rvv_overloaded vint8mf2_t vluxei8(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint8mf2_t op3, size_t op4){
return vluxei8_v_i8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf4_t vluxei8(const int8_t * op0, vuint8mf4_t op1, size_t op2){
return vluxei8_v_i8mf4(op0, op1, op2);
}
__rvv_overloaded vint8mf4_t vluxei8(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint8mf4_t op3, size_t op4){
return vluxei8_v_i8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf8_t vluxei8(const int8_t * op0, vuint8mf8_t op1, size_t op2){
return vluxei8_v_i8mf8(op0, op1, op2);
}
__rvv_overloaded vint8mf8_t vluxei8(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint8mf8_t op3, size_t op4){
return vluxei8_v_i8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m1_t vluxei16(const int8_t * op0, vuint16m2_t op1, size_t op2){
return vluxei16_v_i8m1(op0, op1, op2);
}
__rvv_overloaded vint8m1_t vluxei16(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint16m2_t op3, size_t op4){
return vluxei16_v_i8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m2_t vluxei16(const int8_t * op0, vuint16m4_t op1, size_t op2){
return vluxei16_v_i8m2(op0, op1, op2);
}
__rvv_overloaded vint8m2_t vluxei16(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint16m4_t op3, size_t op4){
return vluxei16_v_i8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m4_t vluxei16(const int8_t * op0, vuint16m8_t op1, size_t op2){
return vluxei16_v_i8m4(op0, op1, op2);
}
__rvv_overloaded vint8m4_t vluxei16(vbool2_t op0, vint8m4_t op1, const int8_t * op2, vuint16m8_t op3, size_t op4){
return vluxei16_v_i8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf2_t vluxei16(const int8_t * op0, vuint16m1_t op1, size_t op2){
return vluxei16_v_i8mf2(op0, op1, op2);
}
__rvv_overloaded vint8mf2_t vluxei16(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint16m1_t op3, size_t op4){
return vluxei16_v_i8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf4_t vluxei16(const int8_t * op0, vuint16mf2_t op1, size_t op2){
return vluxei16_v_i8mf4(op0, op1, op2);
}
__rvv_overloaded vint8mf4_t vluxei16(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint16mf2_t op3, size_t op4){
return vluxei16_v_i8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf8_t vluxei16(const int8_t * op0, vuint16mf4_t op1, size_t op2){
return vluxei16_v_i8mf8(op0, op1, op2);
}
__rvv_overloaded vint8mf8_t vluxei16(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint16mf4_t op3, size_t op4){
return vluxei16_v_i8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m1_t vluxei16(const uint8_t * op0, vuint16m2_t op1, size_t op2){
return vluxei16_v_u8m1(op0, op1, op2);
}
__rvv_overloaded vuint8m1_t vluxei16(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint16m2_t op3, size_t op4){
return vluxei16_v_u8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m2_t vluxei16(const uint8_t * op0, vuint16m4_t op1, size_t op2){
return vluxei16_v_u8m2(op0, op1, op2);
}
__rvv_overloaded vuint8m2_t vluxei16(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint16m4_t op3, size_t op4){
return vluxei16_v_u8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m4_t vluxei16(const uint8_t * op0, vuint16m8_t op1, size_t op2){
return vluxei16_v_u8m4(op0, op1, op2);
}
__rvv_overloaded vuint8m4_t vluxei16(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, vuint16m8_t op3, size_t op4){
return vluxei16_v_u8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf2_t vluxei16(const uint8_t * op0, vuint16m1_t op1, size_t op2){
return vluxei16_v_u8mf2(op0, op1, op2);
}
__rvv_overloaded vuint8mf2_t vluxei16(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint16m1_t op3, size_t op4){
return vluxei16_v_u8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf4_t vluxei16(const uint8_t * op0, vuint16mf2_t op1, size_t op2){
return vluxei16_v_u8mf4(op0, op1, op2);
}
__rvv_overloaded vuint8mf4_t vluxei16(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint16mf2_t op3, size_t op4){
return vluxei16_v_u8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf8_t vluxei16(const uint8_t * op0, vuint16mf4_t op1, size_t op2){
return vluxei16_v_u8mf8(op0, op1, op2);
}
__rvv_overloaded vuint8mf8_t vluxei16(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint16mf4_t op3, size_t op4){
return vluxei16_v_u8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m1_t vluxei32(const int8_t * op0, vuint32m4_t op1, size_t op2){
return vluxei32_v_i8m1(op0, op1, op2);
}
__rvv_overloaded vint8m1_t vluxei32(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint32m4_t op3, size_t op4){
return vluxei32_v_i8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m2_t vluxei32(const int8_t * op0, vuint32m8_t op1, size_t op2){
return vluxei32_v_i8m2(op0, op1, op2);
}
__rvv_overloaded vint8m2_t vluxei32(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint32m8_t op3, size_t op4){
return vluxei32_v_i8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf2_t vluxei32(const int8_t * op0, vuint32m2_t op1, size_t op2){
return vluxei32_v_i8mf2(op0, op1, op2);
}
__rvv_overloaded vint8mf2_t vluxei32(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint32m2_t op3, size_t op4){
return vluxei32_v_i8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf4_t vluxei32(const int8_t * op0, vuint32m1_t op1, size_t op2){
return vluxei32_v_i8mf4(op0, op1, op2);
}
__rvv_overloaded vint8mf4_t vluxei32(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint32m1_t op3, size_t op4){
return vluxei32_v_i8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf8_t vluxei32(const int8_t * op0, vuint32mf2_t op1, size_t op2){
return vluxei32_v_i8mf8(op0, op1, op2);
}
__rvv_overloaded vint8mf8_t vluxei32(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint32mf2_t op3, size_t op4){
return vluxei32_v_i8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m1_t vluxei32(const uint8_t * op0, vuint32m4_t op1, size_t op2){
return vluxei32_v_u8m1(op0, op1, op2);
}
__rvv_overloaded vuint8m1_t vluxei32(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint32m4_t op3, size_t op4){
return vluxei32_v_u8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m2_t vluxei32(const uint8_t * op0, vuint32m8_t op1, size_t op2){
return vluxei32_v_u8m2(op0, op1, op2);
}
__rvv_overloaded vuint8m2_t vluxei32(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint32m8_t op3, size_t op4){
return vluxei32_v_u8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf2_t vluxei32(const uint8_t * op0, vuint32m2_t op1, size_t op2){
return vluxei32_v_u8mf2(op0, op1, op2);
}
__rvv_overloaded vuint8mf2_t vluxei32(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint32m2_t op3, size_t op4){
return vluxei32_v_u8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf4_t vluxei32(const uint8_t * op0, vuint32m1_t op1, size_t op2){
return vluxei32_v_u8mf4(op0, op1, op2);
}
__rvv_overloaded vuint8mf4_t vluxei32(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint32m1_t op3, size_t op4){
return vluxei32_v_u8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf8_t vluxei32(const uint8_t * op0, vuint32mf2_t op1, size_t op2){
return vluxei32_v_u8mf8(op0, op1, op2);
}
__rvv_overloaded vuint8mf8_t vluxei32(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint32mf2_t op3, size_t op4){
return vluxei32_v_u8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m1_t vluxei64(const int8_t * op0, vuint64m8_t op1, size_t op2){
return vluxei64_v_i8m1(op0, op1, op2);
}
__rvv_overloaded vint8m1_t vluxei64(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint64m8_t op3, size_t op4){
return vluxei64_v_i8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf2_t vluxei64(const int8_t * op0, vuint64m4_t op1, size_t op2){
return vluxei64_v_i8mf2(op0, op1, op2);
}
__rvv_overloaded vint8mf2_t vluxei64(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint64m4_t op3, size_t op4){
return vluxei64_v_i8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf4_t vluxei64(const int8_t * op0, vuint64m2_t op1, size_t op2){
return vluxei64_v_i8mf4(op0, op1, op2);
}
__rvv_overloaded vint8mf4_t vluxei64(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint64m2_t op3, size_t op4){
return vluxei64_v_i8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf8_t vluxei64(const int8_t * op0, vuint64m1_t op1, size_t op2){
return vluxei64_v_i8mf8(op0, op1, op2);
}
__rvv_overloaded vint8mf8_t vluxei64(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint64m1_t op3, size_t op4){
return vluxei64_v_i8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m1_t vluxei64(const uint8_t * op0, vuint64m8_t op1, size_t op2){
return vluxei64_v_u8m1(op0, op1, op2);
}
__rvv_overloaded vuint8m1_t vluxei64(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint64m8_t op3, size_t op4){
return vluxei64_v_u8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf2_t vluxei64(const uint8_t * op0, vuint64m4_t op1, size_t op2){
return vluxei64_v_u8mf2(op0, op1, op2);
}
__rvv_overloaded vuint8mf2_t vluxei64(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint64m4_t op3, size_t op4){
return vluxei64_v_u8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf4_t vluxei64(const uint8_t * op0, vuint64m2_t op1, size_t op2){
return vluxei64_v_u8mf4(op0, op1, op2);
}
__rvv_overloaded vuint8mf4_t vluxei64(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint64m2_t op3, size_t op4){
return vluxei64_v_u8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf8_t vluxei64(const uint8_t * op0, vuint64m1_t op1, size_t op2){
return vluxei64_v_u8mf8(op0, op1, op2);
}
__rvv_overloaded vuint8mf8_t vluxei64(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint64m1_t op3, size_t op4){
return vluxei64_v_u8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m1_t vluxei8(const int16_t * op0, vuint8mf2_t op1, size_t op2){
return vluxei8_v_i16m1(op0, op1, op2);
}
__rvv_overloaded vint16m1_t vluxei8(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint8mf2_t op3, size_t op4){
return vluxei8_v_i16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m2_t vluxei8(const int16_t * op0, vuint8m1_t op1, size_t op2){
return vluxei8_v_i16m2(op0, op1, op2);
}
__rvv_overloaded vint16m2_t vluxei8(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint8m1_t op3, size_t op4){
return vluxei8_v_i16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m4_t vluxei8(const int16_t * op0, vuint8m2_t op1, size_t op2){
return vluxei8_v_i16m4(op0, op1, op2);
}
__rvv_overloaded vint16m4_t vluxei8(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint8m2_t op3, size_t op4){
return vluxei8_v_i16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m8_t vluxei8(const int16_t * op0, vuint8m4_t op1, size_t op2){
return vluxei8_v_i16m8(op0, op1, op2);
}
__rvv_overloaded vint16m8_t vluxei8(vbool2_t op0, vint16m8_t op1, const int16_t * op2, vuint8m4_t op3, size_t op4){
return vluxei8_v_i16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf2_t vluxei8(const int16_t * op0, vuint8mf4_t op1, size_t op2){
return vluxei8_v_i16mf2(op0, op1, op2);
}
__rvv_overloaded vint16mf2_t vluxei8(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint8mf4_t op3, size_t op4){
return vluxei8_v_i16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf4_t vluxei8(const int16_t * op0, vuint8mf8_t op1, size_t op2){
return vluxei8_v_i16mf4(op0, op1, op2);
}
__rvv_overloaded vint16mf4_t vluxei8(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint8mf8_t op3, size_t op4){
return vluxei8_v_i16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m1_t vluxei8(const uint16_t * op0, vuint8mf2_t op1, size_t op2){
return vluxei8_v_u16m1(op0, op1, op2);
}
__rvv_overloaded vuint16m1_t vluxei8(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint8mf2_t op3, size_t op4){
return vluxei8_v_u16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m2_t vluxei8(const uint16_t * op0, vuint8m1_t op1, size_t op2){
return vluxei8_v_u16m2(op0, op1, op2);
}
__rvv_overloaded vuint16m2_t vluxei8(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint8m1_t op3, size_t op4){
return vluxei8_v_u16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m4_t vluxei8(const uint16_t * op0, vuint8m2_t op1, size_t op2){
return vluxei8_v_u16m4(op0, op1, op2);
}
__rvv_overloaded vuint16m4_t vluxei8(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint8m2_t op3, size_t op4){
return vluxei8_v_u16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m8_t vluxei8(const uint16_t * op0, vuint8m4_t op1, size_t op2){
return vluxei8_v_u16m8(op0, op1, op2);
}
__rvv_overloaded vuint16m8_t vluxei8(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, vuint8m4_t op3, size_t op4){
return vluxei8_v_u16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf2_t vluxei8(const uint16_t * op0, vuint8mf4_t op1, size_t op2){
return vluxei8_v_u16mf2(op0, op1, op2);
}
__rvv_overloaded vuint16mf2_t vluxei8(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint8mf4_t op3, size_t op4){
return vluxei8_v_u16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf4_t vluxei8(const uint16_t * op0, vuint8mf8_t op1, size_t op2){
return vluxei8_v_u16mf4(op0, op1, op2);
}
__rvv_overloaded vuint16mf4_t vluxei8(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint8mf8_t op3, size_t op4){
return vluxei8_v_u16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m1_t vluxei16(const int16_t * op0, vuint16m1_t op1, size_t op2){
return vluxei16_v_i16m1(op0, op1, op2);
}
__rvv_overloaded vint16m1_t vluxei16(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint16m1_t op3, size_t op4){
return vluxei16_v_i16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m2_t vluxei16(const int16_t * op0, vuint16m2_t op1, size_t op2){
return vluxei16_v_i16m2(op0, op1, op2);
}
__rvv_overloaded vint16m2_t vluxei16(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint16m2_t op3, size_t op4){
return vluxei16_v_i16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m4_t vluxei16(const int16_t * op0, vuint16m4_t op1, size_t op2){
return vluxei16_v_i16m4(op0, op1, op2);
}
__rvv_overloaded vint16m4_t vluxei16(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint16m4_t op3, size_t op4){
return vluxei16_v_i16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m8_t vluxei16(const int16_t * op0, vuint16m8_t op1, size_t op2){
return vluxei16_v_i16m8(op0, op1, op2);
}
__rvv_overloaded vint16m8_t vluxei16(vbool2_t op0, vint16m8_t op1, const int16_t * op2, vuint16m8_t op3, size_t op4){
return vluxei16_v_i16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf2_t vluxei16(const int16_t * op0, vuint16mf2_t op1, size_t op2){
return vluxei16_v_i16mf2(op0, op1, op2);
}
__rvv_overloaded vint16mf2_t vluxei16(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint16mf2_t op3, size_t op4){
return vluxei16_v_i16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf4_t vluxei16(const int16_t * op0, vuint16mf4_t op1, size_t op2){
return vluxei16_v_i16mf4(op0, op1, op2);
}
__rvv_overloaded vint16mf4_t vluxei16(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint16mf4_t op3, size_t op4){
return vluxei16_v_i16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m1_t vluxei16(const uint16_t * op0, vuint16m1_t op1, size_t op2){
return vluxei16_v_u16m1(op0, op1, op2);
}
__rvv_overloaded vuint16m1_t vluxei16(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint16m1_t op3, size_t op4){
return vluxei16_v_u16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m2_t vluxei16(const uint16_t * op0, vuint16m2_t op1, size_t op2){
return vluxei16_v_u16m2(op0, op1, op2);
}
__rvv_overloaded vuint16m2_t vluxei16(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint16m2_t op3, size_t op4){
return vluxei16_v_u16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m4_t vluxei16(const uint16_t * op0, vuint16m4_t op1, size_t op2){
return vluxei16_v_u16m4(op0, op1, op2);
}
__rvv_overloaded vuint16m4_t vluxei16(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint16m4_t op3, size_t op4){
return vluxei16_v_u16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m8_t vluxei16(const uint16_t * op0, vuint16m8_t op1, size_t op2){
return vluxei16_v_u16m8(op0, op1, op2);
}
__rvv_overloaded vuint16m8_t vluxei16(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, vuint16m8_t op3, size_t op4){
return vluxei16_v_u16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf2_t vluxei16(const uint16_t * op0, vuint16mf2_t op1, size_t op2){
return vluxei16_v_u16mf2(op0, op1, op2);
}
__rvv_overloaded vuint16mf2_t vluxei16(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint16mf2_t op3, size_t op4){
return vluxei16_v_u16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf4_t vluxei16(const uint16_t * op0, vuint16mf4_t op1, size_t op2){
return vluxei16_v_u16mf4(op0, op1, op2);
}
__rvv_overloaded vuint16mf4_t vluxei16(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint16mf4_t op3, size_t op4){
return vluxei16_v_u16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m1_t vluxei8(const uint8_t * op0, vuint8m1_t op1, size_t op2){
return vluxei8_v_u8m1(op0, op1, op2);
}
__rvv_overloaded vuint8m1_t vluxei8(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint8m1_t op3, size_t op4){
return vluxei8_v_u8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m2_t vluxei8(const uint8_t * op0, vuint8m2_t op1, size_t op2){
return vluxei8_v_u8m2(op0, op1, op2);
}
__rvv_overloaded vuint8m2_t vluxei8(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint8m2_t op3, size_t op4){
return vluxei8_v_u8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m4_t vluxei8(const uint8_t * op0, vuint8m4_t op1, size_t op2){
return vluxei8_v_u8m4(op0, op1, op2);
}
__rvv_overloaded vuint8m4_t vluxei8(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, vuint8m4_t op3, size_t op4){
return vluxei8_v_u8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m8_t vluxei8(const uint8_t * op0, vuint8m8_t op1, size_t op2){
return vluxei8_v_u8m8(op0, op1, op2);
}
__rvv_overloaded vuint8m8_t vluxei8(vbool1_t op0, vuint8m8_t op1, const uint8_t * op2, vuint8m8_t op3, size_t op4){
return vluxei8_v_u8m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf2_t vluxei8(const uint8_t * op0, vuint8mf2_t op1, size_t op2){
return vluxei8_v_u8mf2(op0, op1, op2);
}
__rvv_overloaded vuint8mf2_t vluxei8(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint8mf2_t op3, size_t op4){
return vluxei8_v_u8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf4_t vluxei8(const uint8_t * op0, vuint8mf4_t op1, size_t op2){
return vluxei8_v_u8mf4(op0, op1, op2);
}
__rvv_overloaded vuint8mf4_t vluxei8(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint8mf4_t op3, size_t op4){
return vluxei8_v_u8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf8_t vluxei8(const uint8_t * op0, vuint8mf8_t op1, size_t op2){
return vluxei8_v_u8mf8(op0, op1, op2);
}
__rvv_overloaded vuint8mf8_t vluxei8(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint8mf8_t op3, size_t op4){
return vluxei8_v_u8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m1_t vluxei32(const int16_t * op0, vuint32m2_t op1, size_t op2){
return vluxei32_v_i16m1(op0, op1, op2);
}
__rvv_overloaded vint16m1_t vluxei32(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint32m2_t op3, size_t op4){
return vluxei32_v_i16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m2_t vluxei32(const int16_t * op0, vuint32m4_t op1, size_t op2){
return vluxei32_v_i16m2(op0, op1, op2);
}
__rvv_overloaded vint16m2_t vluxei32(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint32m4_t op3, size_t op4){
return vluxei32_v_i16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m4_t vluxei32(const int16_t * op0, vuint32m8_t op1, size_t op2){
return vluxei32_v_i16m4(op0, op1, op2);
}
__rvv_overloaded vint16m4_t vluxei32(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint32m8_t op3, size_t op4){
return vluxei32_v_i16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf2_t vluxei32(const int16_t * op0, vuint32m1_t op1, size_t op2){
return vluxei32_v_i16mf2(op0, op1, op2);
}
__rvv_overloaded vint16mf2_t vluxei32(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint32m1_t op3, size_t op4){
return vluxei32_v_i16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf4_t vluxei32(const int16_t * op0, vuint32mf2_t op1, size_t op2){
return vluxei32_v_i16mf4(op0, op1, op2);
}
__rvv_overloaded vint16mf4_t vluxei32(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint32mf2_t op3, size_t op4){
return vluxei32_v_i16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m1_t vluxei32(const uint16_t * op0, vuint32m2_t op1, size_t op2){
return vluxei32_v_u16m1(op0, op1, op2);
}
__rvv_overloaded vuint16m1_t vluxei32(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint32m2_t op3, size_t op4){
return vluxei32_v_u16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m2_t vluxei32(const uint16_t * op0, vuint32m4_t op1, size_t op2){
return vluxei32_v_u16m2(op0, op1, op2);
}
__rvv_overloaded vuint16m2_t vluxei32(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint32m4_t op3, size_t op4){
return vluxei32_v_u16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m4_t vluxei32(const uint16_t * op0, vuint32m8_t op1, size_t op2){
return vluxei32_v_u16m4(op0, op1, op2);
}
__rvv_overloaded vuint16m4_t vluxei32(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint32m8_t op3, size_t op4){
return vluxei32_v_u16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf2_t vluxei32(const uint16_t * op0, vuint32m1_t op1, size_t op2){
return vluxei32_v_u16mf2(op0, op1, op2);
}
__rvv_overloaded vuint16mf2_t vluxei32(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint32m1_t op3, size_t op4){
return vluxei32_v_u16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf4_t vluxei32(const uint16_t * op0, vuint32mf2_t op1, size_t op2){
return vluxei32_v_u16mf4(op0, op1, op2);
}
__rvv_overloaded vuint16mf4_t vluxei32(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint32mf2_t op3, size_t op4){
return vluxei32_v_u16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m1_t vluxei64(const int16_t * op0, vuint64m4_t op1, size_t op2){
return vluxei64_v_i16m1(op0, op1, op2);
}
__rvv_overloaded vint16m1_t vluxei64(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint64m4_t op3, size_t op4){
return vluxei64_v_i16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m2_t vluxei64(const int16_t * op0, vuint64m8_t op1, size_t op2){
return vluxei64_v_i16m2(op0, op1, op2);
}
__rvv_overloaded vint16m2_t vluxei64(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint64m8_t op3, size_t op4){
return vluxei64_v_i16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf2_t vluxei64(const int16_t * op0, vuint64m2_t op1, size_t op2){
return vluxei64_v_i16mf2(op0, op1, op2);
}
__rvv_overloaded vint16mf2_t vluxei64(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint64m2_t op3, size_t op4){
return vluxei64_v_i16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf4_t vluxei64(const int16_t * op0, vuint64m1_t op1, size_t op2){
return vluxei64_v_i16mf4(op0, op1, op2);
}
__rvv_overloaded vint16mf4_t vluxei64(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint64m1_t op3, size_t op4){
return vluxei64_v_i16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m1_t vluxei64(const uint16_t * op0, vuint64m4_t op1, size_t op2){
return vluxei64_v_u16m1(op0, op1, op2);
}
__rvv_overloaded vuint16m1_t vluxei64(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint64m4_t op3, size_t op4){
return vluxei64_v_u16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m2_t vluxei64(const uint16_t * op0, vuint64m8_t op1, size_t op2){
return vluxei64_v_u16m2(op0, op1, op2);
}
__rvv_overloaded vuint16m2_t vluxei64(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint64m8_t op3, size_t op4){
return vluxei64_v_u16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf2_t vluxei64(const uint16_t * op0, vuint64m2_t op1, size_t op2){
return vluxei64_v_u16mf2(op0, op1, op2);
}
__rvv_overloaded vuint16mf2_t vluxei64(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint64m2_t op3, size_t op4){
return vluxei64_v_u16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf4_t vluxei64(const uint16_t * op0, vuint64m1_t op1, size_t op2){
return vluxei64_v_u16mf4(op0, op1, op2);
}
__rvv_overloaded vuint16mf4_t vluxei64(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint64m1_t op3, size_t op4){
return vluxei64_v_u16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m1_t vluxei8(const int32_t * op0, vuint8mf4_t op1, size_t op2){
return vluxei8_v_i32m1(op0, op1, op2);
}
__rvv_overloaded vint32m1_t vluxei8(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint8mf4_t op3, size_t op4){
return vluxei8_v_i32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m2_t vluxei8(const int32_t * op0, vuint8mf2_t op1, size_t op2){
return vluxei8_v_i32m2(op0, op1, op2);
}
__rvv_overloaded vint32m2_t vluxei8(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint8mf2_t op3, size_t op4){
return vluxei8_v_i32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m4_t vluxei8(const int32_t * op0, vuint8m1_t op1, size_t op2){
return vluxei8_v_i32m4(op0, op1, op2);
}
__rvv_overloaded vint32m4_t vluxei8(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint8m1_t op3, size_t op4){
return vluxei8_v_i32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m8_t vluxei8(const int32_t * op0, vuint8m2_t op1, size_t op2){
return vluxei8_v_i32m8(op0, op1, op2);
}
__rvv_overloaded vint32m8_t vluxei8(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint8m2_t op3, size_t op4){
return vluxei8_v_i32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32mf2_t vluxei8(const int32_t * op0, vuint8mf8_t op1, size_t op2){
return vluxei8_v_i32mf2(op0, op1, op2);
}
__rvv_overloaded vint32mf2_t vluxei8(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint8mf8_t op3, size_t op4){
return vluxei8_v_i32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m1_t vluxei8(const uint32_t * op0, vuint8mf4_t op1, size_t op2){
return vluxei8_v_u32m1(op0, op1, op2);
}
__rvv_overloaded vuint32m1_t vluxei8(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint8mf4_t op3, size_t op4){
return vluxei8_v_u32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m2_t vluxei8(const uint32_t * op0, vuint8mf2_t op1, size_t op2){
return vluxei8_v_u32m2(op0, op1, op2);
}
__rvv_overloaded vuint32m2_t vluxei8(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint8mf2_t op3, size_t op4){
return vluxei8_v_u32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m4_t vluxei8(const uint32_t * op0, vuint8m1_t op1, size_t op2){
return vluxei8_v_u32m4(op0, op1, op2);
}
__rvv_overloaded vuint32m4_t vluxei8(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint8m1_t op3, size_t op4){
return vluxei8_v_u32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m8_t vluxei8(const uint32_t * op0, vuint8m2_t op1, size_t op2){
return vluxei8_v_u32m8(op0, op1, op2);
}
__rvv_overloaded vuint32m8_t vluxei8(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint8m2_t op3, size_t op4){
return vluxei8_v_u32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32mf2_t vluxei8(const uint32_t * op0, vuint8mf8_t op1, size_t op2){
return vluxei8_v_u32mf2(op0, op1, op2);
}
__rvv_overloaded vuint32mf2_t vluxei8(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint8mf8_t op3, size_t op4){
return vluxei8_v_u32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m1_t vluxei16(const int32_t * op0, vuint16mf2_t op1, size_t op2){
return vluxei16_v_i32m1(op0, op1, op2);
}
__rvv_overloaded vint32m1_t vluxei16(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint16mf2_t op3, size_t op4){
return vluxei16_v_i32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m2_t vluxei16(const int32_t * op0, vuint16m1_t op1, size_t op2){
return vluxei16_v_i32m2(op0, op1, op2);
}
__rvv_overloaded vint32m2_t vluxei16(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint16m1_t op3, size_t op4){
return vluxei16_v_i32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m4_t vluxei16(const int32_t * op0, vuint16m2_t op1, size_t op2){
return vluxei16_v_i32m4(op0, op1, op2);
}
__rvv_overloaded vint32m4_t vluxei16(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint16m2_t op3, size_t op4){
return vluxei16_v_i32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m8_t vluxei16(const int32_t * op0, vuint16m4_t op1, size_t op2){
return vluxei16_v_i32m8(op0, op1, op2);
}
__rvv_overloaded vint32m8_t vluxei16(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint16m4_t op3, size_t op4){
return vluxei16_v_i32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32mf2_t vluxei16(const int32_t * op0, vuint16mf4_t op1, size_t op2){
return vluxei16_v_i32mf2(op0, op1, op2);
}
__rvv_overloaded vint32mf2_t vluxei16(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint16mf4_t op3, size_t op4){
return vluxei16_v_i32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m1_t vluxei16(const uint32_t * op0, vuint16mf2_t op1, size_t op2){
return vluxei16_v_u32m1(op0, op1, op2);
}
__rvv_overloaded vuint32m1_t vluxei16(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint16mf2_t op3, size_t op4){
return vluxei16_v_u32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m2_t vluxei16(const uint32_t * op0, vuint16m1_t op1, size_t op2){
return vluxei16_v_u32m2(op0, op1, op2);
}
__rvv_overloaded vuint32m2_t vluxei16(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint16m1_t op3, size_t op4){
return vluxei16_v_u32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m4_t vluxei16(const uint32_t * op0, vuint16m2_t op1, size_t op2){
return vluxei16_v_u32m4(op0, op1, op2);
}
__rvv_overloaded vuint32m4_t vluxei16(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint16m2_t op3, size_t op4){
return vluxei16_v_u32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m8_t vluxei16(const uint32_t * op0, vuint16m4_t op1, size_t op2){
return vluxei16_v_u32m8(op0, op1, op2);
}
__rvv_overloaded vuint32m8_t vluxei16(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint16m4_t op3, size_t op4){
return vluxei16_v_u32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32mf2_t vluxei16(const uint32_t * op0, vuint16mf4_t op1, size_t op2){
return vluxei16_v_u32mf2(op0, op1, op2);
}
__rvv_overloaded vuint32mf2_t vluxei16(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint16mf4_t op3, size_t op4){
return vluxei16_v_u32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m1_t vluxei32(const int32_t * op0, vuint32m1_t op1, size_t op2){
return vluxei32_v_i32m1(op0, op1, op2);
}
__rvv_overloaded vint32m1_t vluxei32(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint32m1_t op3, size_t op4){
return vluxei32_v_i32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m2_t vluxei32(const int32_t * op0, vuint32m2_t op1, size_t op2){
return vluxei32_v_i32m2(op0, op1, op2);
}
__rvv_overloaded vint32m2_t vluxei32(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint32m2_t op3, size_t op4){
return vluxei32_v_i32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m4_t vluxei32(const int32_t * op0, vuint32m4_t op1, size_t op2){
return vluxei32_v_i32m4(op0, op1, op2);
}
__rvv_overloaded vint32m4_t vluxei32(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint32m4_t op3, size_t op4){
return vluxei32_v_i32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m8_t vluxei32(const int32_t * op0, vuint32m8_t op1, size_t op2){
return vluxei32_v_i32m8(op0, op1, op2);
}
__rvv_overloaded vint32m8_t vluxei32(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint32m8_t op3, size_t op4){
return vluxei32_v_i32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32mf2_t vluxei32(const int32_t * op0, vuint32mf2_t op1, size_t op2){
return vluxei32_v_i32mf2(op0, op1, op2);
}
__rvv_overloaded vint32mf2_t vluxei32(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint32mf2_t op3, size_t op4){
return vluxei32_v_i32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m1_t vluxei32(const uint32_t * op0, vuint32m1_t op1, size_t op2){
return vluxei32_v_u32m1(op0, op1, op2);
}
__rvv_overloaded vuint32m1_t vluxei32(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint32m1_t op3, size_t op4){
return vluxei32_v_u32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m2_t vluxei32(const uint32_t * op0, vuint32m2_t op1, size_t op2){
return vluxei32_v_u32m2(op0, op1, op2);
}
__rvv_overloaded vuint32m2_t vluxei32(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint32m2_t op3, size_t op4){
return vluxei32_v_u32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m4_t vluxei32(const uint32_t * op0, vuint32m4_t op1, size_t op2){
return vluxei32_v_u32m4(op0, op1, op2);
}
__rvv_overloaded vuint32m4_t vluxei32(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint32m4_t op3, size_t op4){
return vluxei32_v_u32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m8_t vluxei32(const uint32_t * op0, vuint32m8_t op1, size_t op2){
return vluxei32_v_u32m8(op0, op1, op2);
}
__rvv_overloaded vuint32m8_t vluxei32(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint32m8_t op3, size_t op4){
return vluxei32_v_u32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32mf2_t vluxei32(const uint32_t * op0, vuint32mf2_t op1, size_t op2){
return vluxei32_v_u32mf2(op0, op1, op2);
}
__rvv_overloaded vuint32mf2_t vluxei32(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint32mf2_t op3, size_t op4){
return vluxei32_v_u32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m1_t vluxei64(const int32_t * op0, vuint64m2_t op1, size_t op2){
return vluxei64_v_i32m1(op0, op1, op2);
}
__rvv_overloaded vint32m1_t vluxei64(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint64m2_t op3, size_t op4){
return vluxei64_v_i32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m2_t vluxei64(const int32_t * op0, vuint64m4_t op1, size_t op2){
return vluxei64_v_i32m2(op0, op1, op2);
}
__rvv_overloaded vint32m2_t vluxei64(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint64m4_t op3, size_t op4){
return vluxei64_v_i32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m4_t vluxei64(const int32_t * op0, vuint64m8_t op1, size_t op2){
return vluxei64_v_i32m4(op0, op1, op2);
}
__rvv_overloaded vint32m4_t vluxei64(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint64m8_t op3, size_t op4){
return vluxei64_v_i32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32mf2_t vluxei64(const int32_t * op0, vuint64m1_t op1, size_t op2){
return vluxei64_v_i32mf2(op0, op1, op2);
}
__rvv_overloaded vint32mf2_t vluxei64(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint64m1_t op3, size_t op4){
return vluxei64_v_i32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m1_t vluxei64(const uint32_t * op0, vuint64m2_t op1, size_t op2){
return vluxei64_v_u32m1(op0, op1, op2);
}
__rvv_overloaded vuint32m1_t vluxei64(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint64m2_t op3, size_t op4){
return vluxei64_v_u32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m2_t vluxei64(const uint32_t * op0, vuint64m4_t op1, size_t op2){
return vluxei64_v_u32m2(op0, op1, op2);
}
__rvv_overloaded vuint32m2_t vluxei64(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint64m4_t op3, size_t op4){
return vluxei64_v_u32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m4_t vluxei64(const uint32_t * op0, vuint64m8_t op1, size_t op2){
return vluxei64_v_u32m4(op0, op1, op2);
}
__rvv_overloaded vuint32m4_t vluxei64(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint64m8_t op3, size_t op4){
return vluxei64_v_u32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32mf2_t vluxei64(const uint32_t * op0, vuint64m1_t op1, size_t op2){
return vluxei64_v_u32mf2(op0, op1, op2);
}
__rvv_overloaded vuint32mf2_t vluxei64(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint64m1_t op3, size_t op4){
return vluxei64_v_u32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m1_t vluxei8(const int64_t * op0, vuint8mf8_t op1, size_t op2){
return vluxei8_v_i64m1(op0, op1, op2);
}
__rvv_overloaded vint64m1_t vluxei8(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint8mf8_t op3, size_t op4){
return vluxei8_v_i64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m2_t vluxei8(const int64_t * op0, vuint8mf4_t op1, size_t op2){
return vluxei8_v_i64m2(op0, op1, op2);
}
__rvv_overloaded vint64m2_t vluxei8(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint8mf4_t op3, size_t op4){
return vluxei8_v_i64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m4_t vluxei8(const int64_t * op0, vuint8mf2_t op1, size_t op2){
return vluxei8_v_i64m4(op0, op1, op2);
}
__rvv_overloaded vint64m4_t vluxei8(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint8mf2_t op3, size_t op4){
return vluxei8_v_i64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m8_t vluxei8(const int64_t * op0, vuint8m1_t op1, size_t op2){
return vluxei8_v_i64m8(op0, op1, op2);
}
__rvv_overloaded vint64m8_t vluxei8(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint8m1_t op3, size_t op4){
return vluxei8_v_i64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m1_t vluxei8(const uint64_t * op0, vuint8mf8_t op1, size_t op2){
return vluxei8_v_u64m1(op0, op1, op2);
}
__rvv_overloaded vuint64m1_t vluxei8(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint8mf8_t op3, size_t op4){
return vluxei8_v_u64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m2_t vluxei8(const uint64_t * op0, vuint8mf4_t op1, size_t op2){
return vluxei8_v_u64m2(op0, op1, op2);
}
__rvv_overloaded vuint64m2_t vluxei8(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint8mf4_t op3, size_t op4){
return vluxei8_v_u64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m4_t vluxei8(const uint64_t * op0, vuint8mf2_t op1, size_t op2){
return vluxei8_v_u64m4(op0, op1, op2);
}
__rvv_overloaded vuint64m4_t vluxei8(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint8mf2_t op3, size_t op4){
return vluxei8_v_u64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m8_t vluxei8(const uint64_t * op0, vuint8m1_t op1, size_t op2){
return vluxei8_v_u64m8(op0, op1, op2);
}
__rvv_overloaded vuint64m8_t vluxei8(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint8m1_t op3, size_t op4){
return vluxei8_v_u64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m1_t vluxei16(const int64_t * op0, vuint16mf4_t op1, size_t op2){
return vluxei16_v_i64m1(op0, op1, op2);
}
__rvv_overloaded vint64m1_t vluxei16(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint16mf4_t op3, size_t op4){
return vluxei16_v_i64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m2_t vluxei16(const int64_t * op0, vuint16mf2_t op1, size_t op2){
return vluxei16_v_i64m2(op0, op1, op2);
}
__rvv_overloaded vint64m2_t vluxei16(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint16mf2_t op3, size_t op4){
return vluxei16_v_i64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m4_t vluxei16(const int64_t * op0, vuint16m1_t op1, size_t op2){
return vluxei16_v_i64m4(op0, op1, op2);
}
__rvv_overloaded vint64m4_t vluxei16(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint16m1_t op3, size_t op4){
return vluxei16_v_i64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m8_t vluxei16(const int64_t * op0, vuint16m2_t op1, size_t op2){
return vluxei16_v_i64m8(op0, op1, op2);
}
__rvv_overloaded vint64m8_t vluxei16(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint16m2_t op3, size_t op4){
return vluxei16_v_i64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m1_t vluxei16(const uint64_t * op0, vuint16mf4_t op1, size_t op2){
return vluxei16_v_u64m1(op0, op1, op2);
}
__rvv_overloaded vuint64m1_t vluxei16(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint16mf4_t op3, size_t op4){
return vluxei16_v_u64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m2_t vluxei16(const uint64_t * op0, vuint16mf2_t op1, size_t op2){
return vluxei16_v_u64m2(op0, op1, op2);
}
__rvv_overloaded vuint64m2_t vluxei16(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint16mf2_t op3, size_t op4){
return vluxei16_v_u64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m4_t vluxei16(const uint64_t * op0, vuint16m1_t op1, size_t op2){
return vluxei16_v_u64m4(op0, op1, op2);
}
__rvv_overloaded vuint64m4_t vluxei16(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint16m1_t op3, size_t op4){
return vluxei16_v_u64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m8_t vluxei16(const uint64_t * op0, vuint16m2_t op1, size_t op2){
return vluxei16_v_u64m8(op0, op1, op2);
}
__rvv_overloaded vuint64m8_t vluxei16(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint16m2_t op3, size_t op4){
return vluxei16_v_u64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m1_t vluxei32(const int64_t * op0, vuint32mf2_t op1, size_t op2){
return vluxei32_v_i64m1(op0, op1, op2);
}
__rvv_overloaded vint64m1_t vluxei32(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint32mf2_t op3, size_t op4){
return vluxei32_v_i64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m2_t vluxei32(const int64_t * op0, vuint32m1_t op1, size_t op2){
return vluxei32_v_i64m2(op0, op1, op2);
}
__rvv_overloaded vint64m2_t vluxei32(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint32m1_t op3, size_t op4){
return vluxei32_v_i64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m4_t vluxei32(const int64_t * op0, vuint32m2_t op1, size_t op2){
return vluxei32_v_i64m4(op0, op1, op2);
}
__rvv_overloaded vint64m4_t vluxei32(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint32m2_t op3, size_t op4){
return vluxei32_v_i64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m8_t vluxei32(const int64_t * op0, vuint32m4_t op1, size_t op2){
return vluxei32_v_i64m8(op0, op1, op2);
}
__rvv_overloaded vint64m8_t vluxei32(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint32m4_t op3, size_t op4){
return vluxei32_v_i64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m1_t vluxei32(const uint64_t * op0, vuint32mf2_t op1, size_t op2){
return vluxei32_v_u64m1(op0, op1, op2);
}
__rvv_overloaded vuint64m1_t vluxei32(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint32mf2_t op3, size_t op4){
return vluxei32_v_u64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m2_t vluxei32(const uint64_t * op0, vuint32m1_t op1, size_t op2){
return vluxei32_v_u64m2(op0, op1, op2);
}
__rvv_overloaded vuint64m2_t vluxei32(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint32m1_t op3, size_t op4){
return vluxei32_v_u64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m4_t vluxei32(const uint64_t * op0, vuint32m2_t op1, size_t op2){
return vluxei32_v_u64m4(op0, op1, op2);
}
__rvv_overloaded vuint64m4_t vluxei32(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint32m2_t op3, size_t op4){
return vluxei32_v_u64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m8_t vluxei32(const uint64_t * op0, vuint32m4_t op1, size_t op2){
return vluxei32_v_u64m8(op0, op1, op2);
}
__rvv_overloaded vuint64m8_t vluxei32(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint32m4_t op3, size_t op4){
return vluxei32_v_u64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m1_t vluxei64(const int64_t * op0, vuint64m1_t op1, size_t op2){
return vluxei64_v_i64m1(op0, op1, op2);
}
__rvv_overloaded vint64m1_t vluxei64(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint64m1_t op3, size_t op4){
return vluxei64_v_i64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m2_t vluxei64(const int64_t * op0, vuint64m2_t op1, size_t op2){
return vluxei64_v_i64m2(op0, op1, op2);
}
__rvv_overloaded vint64m2_t vluxei64(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint64m2_t op3, size_t op4){
return vluxei64_v_i64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m4_t vluxei64(const int64_t * op0, vuint64m4_t op1, size_t op2){
return vluxei64_v_i64m4(op0, op1, op2);
}
__rvv_overloaded vint64m4_t vluxei64(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint64m4_t op3, size_t op4){
return vluxei64_v_i64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m8_t vluxei64(const int64_t * op0, vuint64m8_t op1, size_t op2){
return vluxei64_v_i64m8(op0, op1, op2);
}
__rvv_overloaded vint64m8_t vluxei64(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint64m8_t op3, size_t op4){
return vluxei64_v_i64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m1_t vluxei64(const uint64_t * op0, vuint64m1_t op1, size_t op2){
return vluxei64_v_u64m1(op0, op1, op2);
}
__rvv_overloaded vuint64m1_t vluxei64(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint64m1_t op3, size_t op4){
return vluxei64_v_u64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m2_t vluxei64(const uint64_t * op0, vuint64m2_t op1, size_t op2){
return vluxei64_v_u64m2(op0, op1, op2);
}
__rvv_overloaded vuint64m2_t vluxei64(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint64m2_t op3, size_t op4){
return vluxei64_v_u64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m4_t vluxei64(const uint64_t * op0, vuint64m4_t op1, size_t op2){
return vluxei64_v_u64m4(op0, op1, op2);
}
__rvv_overloaded vuint64m4_t vluxei64(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint64m4_t op3, size_t op4){
return vluxei64_v_u64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m8_t vluxei64(const uint64_t * op0, vuint64m8_t op1, size_t op2){
return vluxei64_v_u64m8(op0, op1, op2);
}
__rvv_overloaded vuint64m8_t vluxei64(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint64m8_t op3, size_t op4){
return vluxei64_v_u64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m1_t vloxei8(const int8_t * op0, vuint8m1_t op1, size_t op2){
return vloxei8_v_i8m1(op0, op1, op2);
}
__rvv_overloaded vint8m1_t vloxei8(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint8m1_t op3, size_t op4){
return vloxei8_v_i8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m2_t vloxei8(const int8_t * op0, vuint8m2_t op1, size_t op2){
return vloxei8_v_i8m2(op0, op1, op2);
}
__rvv_overloaded vint8m2_t vloxei8(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint8m2_t op3, size_t op4){
return vloxei8_v_i8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m4_t vloxei8(const int8_t * op0, vuint8m4_t op1, size_t op2){
return vloxei8_v_i8m4(op0, op1, op2);
}
__rvv_overloaded vint8m4_t vloxei8(vbool2_t op0, vint8m4_t op1, const int8_t * op2, vuint8m4_t op3, size_t op4){
return vloxei8_v_i8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m8_t vloxei8(const int8_t * op0, vuint8m8_t op1, size_t op2){
return vloxei8_v_i8m8(op0, op1, op2);
}
__rvv_overloaded vint8m8_t vloxei8(vbool1_t op0, vint8m8_t op1, const int8_t * op2, vuint8m8_t op3, size_t op4){
return vloxei8_v_i8m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf2_t vloxei8(const int8_t * op0, vuint8mf2_t op1, size_t op2){
return vloxei8_v_i8mf2(op0, op1, op2);
}
__rvv_overloaded vint8mf2_t vloxei8(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint8mf2_t op3, size_t op4){
return vloxei8_v_i8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf4_t vloxei8(const int8_t * op0, vuint8mf4_t op1, size_t op2){
return vloxei8_v_i8mf4(op0, op1, op2);
}
__rvv_overloaded vint8mf4_t vloxei8(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint8mf4_t op3, size_t op4){
return vloxei8_v_i8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf8_t vloxei8(const int8_t * op0, vuint8mf8_t op1, size_t op2){
return vloxei8_v_i8mf8(op0, op1, op2);
}
__rvv_overloaded vint8mf8_t vloxei8(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint8mf8_t op3, size_t op4){
return vloxei8_v_i8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m1_t vloxei8(const uint8_t * op0, vuint8m1_t op1, size_t op2){
return vloxei8_v_u8m1(op0, op1, op2);
}
__rvv_overloaded vuint8m1_t vloxei8(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint8m1_t op3, size_t op4){
return vloxei8_v_u8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m2_t vloxei8(const uint8_t * op0, vuint8m2_t op1, size_t op2){
return vloxei8_v_u8m2(op0, op1, op2);
}
__rvv_overloaded vuint8m2_t vloxei8(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint8m2_t op3, size_t op4){
return vloxei8_v_u8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m4_t vloxei8(const uint8_t * op0, vuint8m4_t op1, size_t op2){
return vloxei8_v_u8m4(op0, op1, op2);
}
__rvv_overloaded vuint8m4_t vloxei8(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, vuint8m4_t op3, size_t op4){
return vloxei8_v_u8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m8_t vloxei8(const uint8_t * op0, vuint8m8_t op1, size_t op2){
return vloxei8_v_u8m8(op0, op1, op2);
}
__rvv_overloaded vuint8m8_t vloxei8(vbool1_t op0, vuint8m8_t op1, const uint8_t * op2, vuint8m8_t op3, size_t op4){
return vloxei8_v_u8m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf2_t vloxei8(const uint8_t * op0, vuint8mf2_t op1, size_t op2){
return vloxei8_v_u8mf2(op0, op1, op2);
}
__rvv_overloaded vuint8mf2_t vloxei8(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint8mf2_t op3, size_t op4){
return vloxei8_v_u8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf4_t vloxei8(const uint8_t * op0, vuint8mf4_t op1, size_t op2){
return vloxei8_v_u8mf4(op0, op1, op2);
}
__rvv_overloaded vuint8mf4_t vloxei8(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint8mf4_t op3, size_t op4){
return vloxei8_v_u8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf8_t vloxei8(const uint8_t * op0, vuint8mf8_t op1, size_t op2){
return vloxei8_v_u8mf8(op0, op1, op2);
}
__rvv_overloaded vuint8mf8_t vloxei8(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint8mf8_t op3, size_t op4){
return vloxei8_v_u8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m1_t vloxei16(const int8_t * op0, vuint16m2_t op1, size_t op2){
return vloxei16_v_i8m1(op0, op1, op2);
}
__rvv_overloaded vint8m1_t vloxei16(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint16m2_t op3, size_t op4){
return vloxei16_v_i8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m2_t vloxei16(const int8_t * op0, vuint16m4_t op1, size_t op2){
return vloxei16_v_i8m2(op0, op1, op2);
}
__rvv_overloaded vint8m2_t vloxei16(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint16m4_t op3, size_t op4){
return vloxei16_v_i8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m4_t vloxei16(const int8_t * op0, vuint16m8_t op1, size_t op2){
return vloxei16_v_i8m4(op0, op1, op2);
}
__rvv_overloaded vint8m4_t vloxei16(vbool2_t op0, vint8m4_t op1, const int8_t * op2, vuint16m8_t op3, size_t op4){
return vloxei16_v_i8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf2_t vloxei16(const int8_t * op0, vuint16m1_t op1, size_t op2){
return vloxei16_v_i8mf2(op0, op1, op2);
}
__rvv_overloaded vint8mf2_t vloxei16(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint16m1_t op3, size_t op4){
return vloxei16_v_i8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf4_t vloxei16(const int8_t * op0, vuint16mf2_t op1, size_t op2){
return vloxei16_v_i8mf4(op0, op1, op2);
}
__rvv_overloaded vint8mf4_t vloxei16(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint16mf2_t op3, size_t op4){
return vloxei16_v_i8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf8_t vloxei16(const int8_t * op0, vuint16mf4_t op1, size_t op2){
return vloxei16_v_i8mf8(op0, op1, op2);
}
__rvv_overloaded vint8mf8_t vloxei16(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint16mf4_t op3, size_t op4){
return vloxei16_v_i8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m1_t vloxei16(const uint8_t * op0, vuint16m2_t op1, size_t op2){
return vloxei16_v_u8m1(op0, op1, op2);
}
__rvv_overloaded vuint8m1_t vloxei16(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint16m2_t op3, size_t op4){
return vloxei16_v_u8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m2_t vloxei16(const uint8_t * op0, vuint16m4_t op1, size_t op2){
return vloxei16_v_u8m2(op0, op1, op2);
}
__rvv_overloaded vuint8m2_t vloxei16(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint16m4_t op3, size_t op4){
return vloxei16_v_u8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m4_t vloxei16(const uint8_t * op0, vuint16m8_t op1, size_t op2){
return vloxei16_v_u8m4(op0, op1, op2);
}
__rvv_overloaded vuint8m4_t vloxei16(vbool2_t op0, vuint8m4_t op1, const uint8_t * op2, vuint16m8_t op3, size_t op4){
return vloxei16_v_u8m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf2_t vloxei16(const uint8_t * op0, vuint16m1_t op1, size_t op2){
return vloxei16_v_u8mf2(op0, op1, op2);
}
__rvv_overloaded vuint8mf2_t vloxei16(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint16m1_t op3, size_t op4){
return vloxei16_v_u8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf4_t vloxei16(const uint8_t * op0, vuint16mf2_t op1, size_t op2){
return vloxei16_v_u8mf4(op0, op1, op2);
}
__rvv_overloaded vuint8mf4_t vloxei16(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint16mf2_t op3, size_t op4){
return vloxei16_v_u8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf8_t vloxei16(const uint8_t * op0, vuint16mf4_t op1, size_t op2){
return vloxei16_v_u8mf8(op0, op1, op2);
}
__rvv_overloaded vuint8mf8_t vloxei16(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint16mf4_t op3, size_t op4){
return vloxei16_v_u8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m1_t vloxei32(const int8_t * op0, vuint32m4_t op1, size_t op2){
return vloxei32_v_i8m1(op0, op1, op2);
}
__rvv_overloaded vint8m1_t vloxei32(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint32m4_t op3, size_t op4){
return vloxei32_v_i8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m2_t vloxei32(const int8_t * op0, vuint32m8_t op1, size_t op2){
return vloxei32_v_i8m2(op0, op1, op2);
}
__rvv_overloaded vint8m2_t vloxei32(vbool4_t op0, vint8m2_t op1, const int8_t * op2, vuint32m8_t op3, size_t op4){
return vloxei32_v_i8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf2_t vloxei32(const int8_t * op0, vuint32m2_t op1, size_t op2){
return vloxei32_v_i8mf2(op0, op1, op2);
}
__rvv_overloaded vint8mf2_t vloxei32(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint32m2_t op3, size_t op4){
return vloxei32_v_i8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf4_t vloxei32(const int8_t * op0, vuint32m1_t op1, size_t op2){
return vloxei32_v_i8mf4(op0, op1, op2);
}
__rvv_overloaded vint8mf4_t vloxei32(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint32m1_t op3, size_t op4){
return vloxei32_v_i8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf8_t vloxei32(const int8_t * op0, vuint32mf2_t op1, size_t op2){
return vloxei32_v_i8mf8(op0, op1, op2);
}
__rvv_overloaded vint8mf8_t vloxei32(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint32mf2_t op3, size_t op4){
return vloxei32_v_i8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m1_t vloxei32(const uint8_t * op0, vuint32m4_t op1, size_t op2){
return vloxei32_v_u8m1(op0, op1, op2);
}
__rvv_overloaded vuint8m1_t vloxei32(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint32m4_t op3, size_t op4){
return vloxei32_v_u8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m2_t vloxei32(const uint8_t * op0, vuint32m8_t op1, size_t op2){
return vloxei32_v_u8m2(op0, op1, op2);
}
__rvv_overloaded vuint8m2_t vloxei32(vbool4_t op0, vuint8m2_t op1, const uint8_t * op2, vuint32m8_t op3, size_t op4){
return vloxei32_v_u8m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf2_t vloxei32(const uint8_t * op0, vuint32m2_t op1, size_t op2){
return vloxei32_v_u8mf2(op0, op1, op2);
}
__rvv_overloaded vuint8mf2_t vloxei32(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint32m2_t op3, size_t op4){
return vloxei32_v_u8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf4_t vloxei32(const uint8_t * op0, vuint32m1_t op1, size_t op2){
return vloxei32_v_u8mf4(op0, op1, op2);
}
__rvv_overloaded vuint8mf4_t vloxei32(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint32m1_t op3, size_t op4){
return vloxei32_v_u8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf8_t vloxei32(const uint8_t * op0, vuint32mf2_t op1, size_t op2){
return vloxei32_v_u8mf8(op0, op1, op2);
}
__rvv_overloaded vuint8mf8_t vloxei32(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint32mf2_t op3, size_t op4){
return vloxei32_v_u8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8m1_t vloxei64(const int8_t * op0, vuint64m8_t op1, size_t op2){
return vloxei64_v_i8m1(op0, op1, op2);
}
__rvv_overloaded vint8m1_t vloxei64(vbool8_t op0, vint8m1_t op1, const int8_t * op2, vuint64m8_t op3, size_t op4){
return vloxei64_v_i8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf2_t vloxei64(const int8_t * op0, vuint64m4_t op1, size_t op2){
return vloxei64_v_i8mf2(op0, op1, op2);
}
__rvv_overloaded vint8mf2_t vloxei64(vbool16_t op0, vint8mf2_t op1, const int8_t * op2, vuint64m4_t op3, size_t op4){
return vloxei64_v_i8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf4_t vloxei64(const int8_t * op0, vuint64m2_t op1, size_t op2){
return vloxei64_v_i8mf4(op0, op1, op2);
}
__rvv_overloaded vint8mf4_t vloxei64(vbool32_t op0, vint8mf4_t op1, const int8_t * op2, vuint64m2_t op3, size_t op4){
return vloxei64_v_i8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint8mf8_t vloxei64(const int8_t * op0, vuint64m1_t op1, size_t op2){
return vloxei64_v_i8mf8(op0, op1, op2);
}
__rvv_overloaded vint8mf8_t vloxei64(vbool64_t op0, vint8mf8_t op1, const int8_t * op2, vuint64m1_t op3, size_t op4){
return vloxei64_v_i8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8m1_t vloxei64(const uint8_t * op0, vuint64m8_t op1, size_t op2){
return vloxei64_v_u8m1(op0, op1, op2);
}
__rvv_overloaded vuint8m1_t vloxei64(vbool8_t op0, vuint8m1_t op1, const uint8_t * op2, vuint64m8_t op3, size_t op4){
return vloxei64_v_u8m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf2_t vloxei64(const uint8_t * op0, vuint64m4_t op1, size_t op2){
return vloxei64_v_u8mf2(op0, op1, op2);
}
__rvv_overloaded vuint8mf2_t vloxei64(vbool16_t op0, vuint8mf2_t op1, const uint8_t * op2, vuint64m4_t op3, size_t op4){
return vloxei64_v_u8mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf4_t vloxei64(const uint8_t * op0, vuint64m2_t op1, size_t op2){
return vloxei64_v_u8mf4(op0, op1, op2);
}
__rvv_overloaded vuint8mf4_t vloxei64(vbool32_t op0, vuint8mf4_t op1, const uint8_t * op2, vuint64m2_t op3, size_t op4){
return vloxei64_v_u8mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint8mf8_t vloxei64(const uint8_t * op0, vuint64m1_t op1, size_t op2){
return vloxei64_v_u8mf8(op0, op1, op2);
}
__rvv_overloaded vuint8mf8_t vloxei64(vbool64_t op0, vuint8mf8_t op1, const uint8_t * op2, vuint64m1_t op3, size_t op4){
return vloxei64_v_u8mf8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m1_t vloxei8(const int16_t * op0, vuint8mf2_t op1, size_t op2){
return vloxei8_v_i16m1(op0, op1, op2);
}
__rvv_overloaded vint16m1_t vloxei8(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint8mf2_t op3, size_t op4){
return vloxei8_v_i16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m2_t vloxei8(const int16_t * op0, vuint8m1_t op1, size_t op2){
return vloxei8_v_i16m2(op0, op1, op2);
}
__rvv_overloaded vint16m2_t vloxei8(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint8m1_t op3, size_t op4){
return vloxei8_v_i16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m4_t vloxei8(const int16_t * op0, vuint8m2_t op1, size_t op2){
return vloxei8_v_i16m4(op0, op1, op2);
}
__rvv_overloaded vint16m4_t vloxei8(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint8m2_t op3, size_t op4){
return vloxei8_v_i16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m8_t vloxei8(const int16_t * op0, vuint8m4_t op1, size_t op2){
return vloxei8_v_i16m8(op0, op1, op2);
}
__rvv_overloaded vint16m8_t vloxei8(vbool2_t op0, vint16m8_t op1, const int16_t * op2, vuint8m4_t op3, size_t op4){
return vloxei8_v_i16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf2_t vloxei8(const int16_t * op0, vuint8mf4_t op1, size_t op2){
return vloxei8_v_i16mf2(op0, op1, op2);
}
__rvv_overloaded vint16mf2_t vloxei8(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint8mf4_t op3, size_t op4){
return vloxei8_v_i16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf4_t vloxei8(const int16_t * op0, vuint8mf8_t op1, size_t op2){
return vloxei8_v_i16mf4(op0, op1, op2);
}
__rvv_overloaded vint16mf4_t vloxei8(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint8mf8_t op3, size_t op4){
return vloxei8_v_i16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m1_t vloxei8(const uint16_t * op0, vuint8mf2_t op1, size_t op2){
return vloxei8_v_u16m1(op0, op1, op2);
}
__rvv_overloaded vuint16m1_t vloxei8(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint8mf2_t op3, size_t op4){
return vloxei8_v_u16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m2_t vloxei8(const uint16_t * op0, vuint8m1_t op1, size_t op2){
return vloxei8_v_u16m2(op0, op1, op2);
}
__rvv_overloaded vuint16m2_t vloxei8(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint8m1_t op3, size_t op4){
return vloxei8_v_u16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m4_t vloxei8(const uint16_t * op0, vuint8m2_t op1, size_t op2){
return vloxei8_v_u16m4(op0, op1, op2);
}
__rvv_overloaded vuint16m4_t vloxei8(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint8m2_t op3, size_t op4){
return vloxei8_v_u16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m8_t vloxei8(const uint16_t * op0, vuint8m4_t op1, size_t op2){
return vloxei8_v_u16m8(op0, op1, op2);
}
__rvv_overloaded vuint16m8_t vloxei8(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, vuint8m4_t op3, size_t op4){
return vloxei8_v_u16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf2_t vloxei8(const uint16_t * op0, vuint8mf4_t op1, size_t op2){
return vloxei8_v_u16mf2(op0, op1, op2);
}
__rvv_overloaded vuint16mf2_t vloxei8(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint8mf4_t op3, size_t op4){
return vloxei8_v_u16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf4_t vloxei8(const uint16_t * op0, vuint8mf8_t op1, size_t op2){
return vloxei8_v_u16mf4(op0, op1, op2);
}
__rvv_overloaded vuint16mf4_t vloxei8(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint8mf8_t op3, size_t op4){
return vloxei8_v_u16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m1_t vloxei16(const int16_t * op0, vuint16m1_t op1, size_t op2){
return vloxei16_v_i16m1(op0, op1, op2);
}
__rvv_overloaded vint16m1_t vloxei16(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint16m1_t op3, size_t op4){
return vloxei16_v_i16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m2_t vloxei16(const int16_t * op0, vuint16m2_t op1, size_t op2){
return vloxei16_v_i16m2(op0, op1, op2);
}
__rvv_overloaded vint16m2_t vloxei16(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint16m2_t op3, size_t op4){
return vloxei16_v_i16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m4_t vloxei16(const int16_t * op0, vuint16m4_t op1, size_t op2){
return vloxei16_v_i16m4(op0, op1, op2);
}
__rvv_overloaded vint16m4_t vloxei16(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint16m4_t op3, size_t op4){
return vloxei16_v_i16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m8_t vloxei16(const int16_t * op0, vuint16m8_t op1, size_t op2){
return vloxei16_v_i16m8(op0, op1, op2);
}
__rvv_overloaded vint16m8_t vloxei16(vbool2_t op0, vint16m8_t op1, const int16_t * op2, vuint16m8_t op3, size_t op4){
return vloxei16_v_i16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf2_t vloxei16(const int16_t * op0, vuint16mf2_t op1, size_t op2){
return vloxei16_v_i16mf2(op0, op1, op2);
}
__rvv_overloaded vint16mf2_t vloxei16(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint16mf2_t op3, size_t op4){
return vloxei16_v_i16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf4_t vloxei16(const int16_t * op0, vuint16mf4_t op1, size_t op2){
return vloxei16_v_i16mf4(op0, op1, op2);
}
__rvv_overloaded vint16mf4_t vloxei16(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint16mf4_t op3, size_t op4){
return vloxei16_v_i16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m1_t vloxei16(const uint16_t * op0, vuint16m1_t op1, size_t op2){
return vloxei16_v_u16m1(op0, op1, op2);
}
__rvv_overloaded vuint16m1_t vloxei16(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint16m1_t op3, size_t op4){
return vloxei16_v_u16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m2_t vloxei16(const uint16_t * op0, vuint16m2_t op1, size_t op2){
return vloxei16_v_u16m2(op0, op1, op2);
}
__rvv_overloaded vuint16m2_t vloxei16(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint16m2_t op3, size_t op4){
return vloxei16_v_u16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m4_t vloxei16(const uint16_t * op0, vuint16m4_t op1, size_t op2){
return vloxei16_v_u16m4(op0, op1, op2);
}
__rvv_overloaded vuint16m4_t vloxei16(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint16m4_t op3, size_t op4){
return vloxei16_v_u16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m8_t vloxei16(const uint16_t * op0, vuint16m8_t op1, size_t op2){
return vloxei16_v_u16m8(op0, op1, op2);
}
__rvv_overloaded vuint16m8_t vloxei16(vbool2_t op0, vuint16m8_t op1, const uint16_t * op2, vuint16m8_t op3, size_t op4){
return vloxei16_v_u16m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf2_t vloxei16(const uint16_t * op0, vuint16mf2_t op1, size_t op2){
return vloxei16_v_u16mf2(op0, op1, op2);
}
__rvv_overloaded vuint16mf2_t vloxei16(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint16mf2_t op3, size_t op4){
return vloxei16_v_u16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf4_t vloxei16(const uint16_t * op0, vuint16mf4_t op1, size_t op2){
return vloxei16_v_u16mf4(op0, op1, op2);
}
__rvv_overloaded vuint16mf4_t vloxei16(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint16mf4_t op3, size_t op4){
return vloxei16_v_u16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m1_t vloxei32(const int16_t * op0, vuint32m2_t op1, size_t op2){
return vloxei32_v_i16m1(op0, op1, op2);
}
__rvv_overloaded vint16m1_t vloxei32(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint32m2_t op3, size_t op4){
return vloxei32_v_i16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m2_t vloxei32(const int16_t * op0, vuint32m4_t op1, size_t op2){
return vloxei32_v_i16m2(op0, op1, op2);
}
__rvv_overloaded vint16m2_t vloxei32(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint32m4_t op3, size_t op4){
return vloxei32_v_i16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m4_t vloxei32(const int16_t * op0, vuint32m8_t op1, size_t op2){
return vloxei32_v_i16m4(op0, op1, op2);
}
__rvv_overloaded vint16m4_t vloxei32(vbool4_t op0, vint16m4_t op1, const int16_t * op2, vuint32m8_t op3, size_t op4){
return vloxei32_v_i16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf2_t vloxei32(const int16_t * op0, vuint32m1_t op1, size_t op2){
return vloxei32_v_i16mf2(op0, op1, op2);
}
__rvv_overloaded vint16mf2_t vloxei32(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint32m1_t op3, size_t op4){
return vloxei32_v_i16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf4_t vloxei32(const int16_t * op0, vuint32mf2_t op1, size_t op2){
return vloxei32_v_i16mf4(op0, op1, op2);
}
__rvv_overloaded vint16mf4_t vloxei32(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint32mf2_t op3, size_t op4){
return vloxei32_v_i16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m1_t vloxei32(const uint16_t * op0, vuint32m2_t op1, size_t op2){
return vloxei32_v_u16m1(op0, op1, op2);
}
__rvv_overloaded vuint16m1_t vloxei32(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint32m2_t op3, size_t op4){
return vloxei32_v_u16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m2_t vloxei32(const uint16_t * op0, vuint32m4_t op1, size_t op2){
return vloxei32_v_u16m2(op0, op1, op2);
}
__rvv_overloaded vuint16m2_t vloxei32(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint32m4_t op3, size_t op4){
return vloxei32_v_u16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m4_t vloxei32(const uint16_t * op0, vuint32m8_t op1, size_t op2){
return vloxei32_v_u16m4(op0, op1, op2);
}
__rvv_overloaded vuint16m4_t vloxei32(vbool4_t op0, vuint16m4_t op1, const uint16_t * op2, vuint32m8_t op3, size_t op4){
return vloxei32_v_u16m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf2_t vloxei32(const uint16_t * op0, vuint32m1_t op1, size_t op2){
return vloxei32_v_u16mf2(op0, op1, op2);
}
__rvv_overloaded vuint16mf2_t vloxei32(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint32m1_t op3, size_t op4){
return vloxei32_v_u16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf4_t vloxei32(const uint16_t * op0, vuint32mf2_t op1, size_t op2){
return vloxei32_v_u16mf4(op0, op1, op2);
}
__rvv_overloaded vuint16mf4_t vloxei32(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint32mf2_t op3, size_t op4){
return vloxei32_v_u16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m1_t vloxei64(const int16_t * op0, vuint64m4_t op1, size_t op2){
return vloxei64_v_i16m1(op0, op1, op2);
}
__rvv_overloaded vint16m1_t vloxei64(vbool16_t op0, vint16m1_t op1, const int16_t * op2, vuint64m4_t op3, size_t op4){
return vloxei64_v_i16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16m2_t vloxei64(const int16_t * op0, vuint64m8_t op1, size_t op2){
return vloxei64_v_i16m2(op0, op1, op2);
}
__rvv_overloaded vint16m2_t vloxei64(vbool8_t op0, vint16m2_t op1, const int16_t * op2, vuint64m8_t op3, size_t op4){
return vloxei64_v_i16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf2_t vloxei64(const int16_t * op0, vuint64m2_t op1, size_t op2){
return vloxei64_v_i16mf2(op0, op1, op2);
}
__rvv_overloaded vint16mf2_t vloxei64(vbool32_t op0, vint16mf2_t op1, const int16_t * op2, vuint64m2_t op3, size_t op4){
return vloxei64_v_i16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint16mf4_t vloxei64(const int16_t * op0, vuint64m1_t op1, size_t op2){
return vloxei64_v_i16mf4(op0, op1, op2);
}
__rvv_overloaded vint16mf4_t vloxei64(vbool64_t op0, vint16mf4_t op1, const int16_t * op2, vuint64m1_t op3, size_t op4){
return vloxei64_v_i16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m1_t vloxei64(const uint16_t * op0, vuint64m4_t op1, size_t op2){
return vloxei64_v_u16m1(op0, op1, op2);
}
__rvv_overloaded vuint16m1_t vloxei64(vbool16_t op0, vuint16m1_t op1, const uint16_t * op2, vuint64m4_t op3, size_t op4){
return vloxei64_v_u16m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16m2_t vloxei64(const uint16_t * op0, vuint64m8_t op1, size_t op2){
return vloxei64_v_u16m2(op0, op1, op2);
}
__rvv_overloaded vuint16m2_t vloxei64(vbool8_t op0, vuint16m2_t op1, const uint16_t * op2, vuint64m8_t op3, size_t op4){
return vloxei64_v_u16m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf2_t vloxei64(const uint16_t * op0, vuint64m2_t op1, size_t op2){
return vloxei64_v_u16mf2(op0, op1, op2);
}
__rvv_overloaded vuint16mf2_t vloxei64(vbool32_t op0, vuint16mf2_t op1, const uint16_t * op2, vuint64m2_t op3, size_t op4){
return vloxei64_v_u16mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint16mf4_t vloxei64(const uint16_t * op0, vuint64m1_t op1, size_t op2){
return vloxei64_v_u16mf4(op0, op1, op2);
}
__rvv_overloaded vuint16mf4_t vloxei64(vbool64_t op0, vuint16mf4_t op1, const uint16_t * op2, vuint64m1_t op3, size_t op4){
return vloxei64_v_u16mf4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m1_t vloxei8(const int32_t * op0, vuint8mf4_t op1, size_t op2){
return vloxei8_v_i32m1(op0, op1, op2);
}
__rvv_overloaded vint32m1_t vloxei8(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint8mf4_t op3, size_t op4){
return vloxei8_v_i32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m2_t vloxei8(const int32_t * op0, vuint8mf2_t op1, size_t op2){
return vloxei8_v_i32m2(op0, op1, op2);
}
__rvv_overloaded vint32m2_t vloxei8(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint8mf2_t op3, size_t op4){
return vloxei8_v_i32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m4_t vloxei8(const int32_t * op0, vuint8m1_t op1, size_t op2){
return vloxei8_v_i32m4(op0, op1, op2);
}
__rvv_overloaded vint32m4_t vloxei8(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint8m1_t op3, size_t op4){
return vloxei8_v_i32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m8_t vloxei8(const int32_t * op0, vuint8m2_t op1, size_t op2){
return vloxei8_v_i32m8(op0, op1, op2);
}
__rvv_overloaded vint32m8_t vloxei8(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint8m2_t op3, size_t op4){
return vloxei8_v_i32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32mf2_t vloxei8(const int32_t * op0, vuint8mf8_t op1, size_t op2){
return vloxei8_v_i32mf2(op0, op1, op2);
}
__rvv_overloaded vint32mf2_t vloxei8(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint8mf8_t op3, size_t op4){
return vloxei8_v_i32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m1_t vloxei8(const uint32_t * op0, vuint8mf4_t op1, size_t op2){
return vloxei8_v_u32m1(op0, op1, op2);
}
__rvv_overloaded vuint32m1_t vloxei8(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint8mf4_t op3, size_t op4){
return vloxei8_v_u32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m2_t vloxei8(const uint32_t * op0, vuint8mf2_t op1, size_t op2){
return vloxei8_v_u32m2(op0, op1, op2);
}
__rvv_overloaded vuint32m2_t vloxei8(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint8mf2_t op3, size_t op4){
return vloxei8_v_u32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m4_t vloxei8(const uint32_t * op0, vuint8m1_t op1, size_t op2){
return vloxei8_v_u32m4(op0, op1, op2);
}
__rvv_overloaded vuint32m4_t vloxei8(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint8m1_t op3, size_t op4){
return vloxei8_v_u32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m8_t vloxei8(const uint32_t * op0, vuint8m2_t op1, size_t op2){
return vloxei8_v_u32m8(op0, op1, op2);
}
__rvv_overloaded vuint32m8_t vloxei8(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint8m2_t op3, size_t op4){
return vloxei8_v_u32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32mf2_t vloxei8(const uint32_t * op0, vuint8mf8_t op1, size_t op2){
return vloxei8_v_u32mf2(op0, op1, op2);
}
__rvv_overloaded vuint32mf2_t vloxei8(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint8mf8_t op3, size_t op4){
return vloxei8_v_u32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m1_t vloxei16(const int32_t * op0, vuint16mf2_t op1, size_t op2){
return vloxei16_v_i32m1(op0, op1, op2);
}
__rvv_overloaded vint32m1_t vloxei16(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint16mf2_t op3, size_t op4){
return vloxei16_v_i32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m2_t vloxei16(const int32_t * op0, vuint16m1_t op1, size_t op2){
return vloxei16_v_i32m2(op0, op1, op2);
}
__rvv_overloaded vint32m2_t vloxei16(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint16m1_t op3, size_t op4){
return vloxei16_v_i32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m4_t vloxei16(const int32_t * op0, vuint16m2_t op1, size_t op2){
return vloxei16_v_i32m4(op0, op1, op2);
}
__rvv_overloaded vint32m4_t vloxei16(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint16m2_t op3, size_t op4){
return vloxei16_v_i32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m8_t vloxei16(const int32_t * op0, vuint16m4_t op1, size_t op2){
return vloxei16_v_i32m8(op0, op1, op2);
}
__rvv_overloaded vint32m8_t vloxei16(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint16m4_t op3, size_t op4){
return vloxei16_v_i32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32mf2_t vloxei16(const int32_t * op0, vuint16mf4_t op1, size_t op2){
return vloxei16_v_i32mf2(op0, op1, op2);
}
__rvv_overloaded vint32mf2_t vloxei16(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint16mf4_t op3, size_t op4){
return vloxei16_v_i32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m1_t vloxei16(const uint32_t * op0, vuint16mf2_t op1, size_t op2){
return vloxei16_v_u32m1(op0, op1, op2);
}
__rvv_overloaded vuint32m1_t vloxei16(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint16mf2_t op3, size_t op4){
return vloxei16_v_u32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m2_t vloxei16(const uint32_t * op0, vuint16m1_t op1, size_t op2){
return vloxei16_v_u32m2(op0, op1, op2);
}
__rvv_overloaded vuint32m2_t vloxei16(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint16m1_t op3, size_t op4){
return vloxei16_v_u32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m4_t vloxei16(const uint32_t * op0, vuint16m2_t op1, size_t op2){
return vloxei16_v_u32m4(op0, op1, op2);
}
__rvv_overloaded vuint32m4_t vloxei16(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint16m2_t op3, size_t op4){
return vloxei16_v_u32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m8_t vloxei16(const uint32_t * op0, vuint16m4_t op1, size_t op2){
return vloxei16_v_u32m8(op0, op1, op2);
}
__rvv_overloaded vuint32m8_t vloxei16(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint16m4_t op3, size_t op4){
return vloxei16_v_u32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32mf2_t vloxei16(const uint32_t * op0, vuint16mf4_t op1, size_t op2){
return vloxei16_v_u32mf2(op0, op1, op2);
}
__rvv_overloaded vuint32mf2_t vloxei16(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint16mf4_t op3, size_t op4){
return vloxei16_v_u32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m1_t vloxei32(const int32_t * op0, vuint32m1_t op1, size_t op2){
return vloxei32_v_i32m1(op0, op1, op2);
}
__rvv_overloaded vint32m1_t vloxei32(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint32m1_t op3, size_t op4){
return vloxei32_v_i32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m2_t vloxei32(const int32_t * op0, vuint32m2_t op1, size_t op2){
return vloxei32_v_i32m2(op0, op1, op2);
}
__rvv_overloaded vint32m2_t vloxei32(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint32m2_t op3, size_t op4){
return vloxei32_v_i32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m4_t vloxei32(const int32_t * op0, vuint32m4_t op1, size_t op2){
return vloxei32_v_i32m4(op0, op1, op2);
}
__rvv_overloaded vint32m4_t vloxei32(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint32m4_t op3, size_t op4){
return vloxei32_v_i32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m8_t vloxei32(const int32_t * op0, vuint32m8_t op1, size_t op2){
return vloxei32_v_i32m8(op0, op1, op2);
}
__rvv_overloaded vint32m8_t vloxei32(vbool4_t op0, vint32m8_t op1, const int32_t * op2, vuint32m8_t op3, size_t op4){
return vloxei32_v_i32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32mf2_t vloxei32(const int32_t * op0, vuint32mf2_t op1, size_t op2){
return vloxei32_v_i32mf2(op0, op1, op2);
}
__rvv_overloaded vint32mf2_t vloxei32(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint32mf2_t op3, size_t op4){
return vloxei32_v_i32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m1_t vloxei32(const uint32_t * op0, vuint32m1_t op1, size_t op2){
return vloxei32_v_u32m1(op0, op1, op2);
}
__rvv_overloaded vuint32m1_t vloxei32(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint32m1_t op3, size_t op4){
return vloxei32_v_u32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m2_t vloxei32(const uint32_t * op0, vuint32m2_t op1, size_t op2){
return vloxei32_v_u32m2(op0, op1, op2);
}
__rvv_overloaded vuint32m2_t vloxei32(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint32m2_t op3, size_t op4){
return vloxei32_v_u32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m4_t vloxei32(const uint32_t * op0, vuint32m4_t op1, size_t op2){
return vloxei32_v_u32m4(op0, op1, op2);
}
__rvv_overloaded vuint32m4_t vloxei32(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint32m4_t op3, size_t op4){
return vloxei32_v_u32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m8_t vloxei32(const uint32_t * op0, vuint32m8_t op1, size_t op2){
return vloxei32_v_u32m8(op0, op1, op2);
}
__rvv_overloaded vuint32m8_t vloxei32(vbool4_t op0, vuint32m8_t op1, const uint32_t * op2, vuint32m8_t op3, size_t op4){
return vloxei32_v_u32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32mf2_t vloxei32(const uint32_t * op0, vuint32mf2_t op1, size_t op2){
return vloxei32_v_u32mf2(op0, op1, op2);
}
__rvv_overloaded vuint32mf2_t vloxei32(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint32mf2_t op3, size_t op4){
return vloxei32_v_u32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m1_t vloxei64(const int32_t * op0, vuint64m2_t op1, size_t op2){
return vloxei64_v_i32m1(op0, op1, op2);
}
__rvv_overloaded vint32m1_t vloxei64(vbool32_t op0, vint32m1_t op1, const int32_t * op2, vuint64m2_t op3, size_t op4){
return vloxei64_v_i32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m2_t vloxei64(const int32_t * op0, vuint64m4_t op1, size_t op2){
return vloxei64_v_i32m2(op0, op1, op2);
}
__rvv_overloaded vint32m2_t vloxei64(vbool16_t op0, vint32m2_t op1, const int32_t * op2, vuint64m4_t op3, size_t op4){
return vloxei64_v_i32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32m4_t vloxei64(const int32_t * op0, vuint64m8_t op1, size_t op2){
return vloxei64_v_i32m4(op0, op1, op2);
}
__rvv_overloaded vint32m4_t vloxei64(vbool8_t op0, vint32m4_t op1, const int32_t * op2, vuint64m8_t op3, size_t op4){
return vloxei64_v_i32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint32mf2_t vloxei64(const int32_t * op0, vuint64m1_t op1, size_t op2){
return vloxei64_v_i32mf2(op0, op1, op2);
}
__rvv_overloaded vint32mf2_t vloxei64(vbool64_t op0, vint32mf2_t op1, const int32_t * op2, vuint64m1_t op3, size_t op4){
return vloxei64_v_i32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m1_t vloxei64(const uint32_t * op0, vuint64m2_t op1, size_t op2){
return vloxei64_v_u32m1(op0, op1, op2);
}
__rvv_overloaded vuint32m1_t vloxei64(vbool32_t op0, vuint32m1_t op1, const uint32_t * op2, vuint64m2_t op3, size_t op4){
return vloxei64_v_u32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m2_t vloxei64(const uint32_t * op0, vuint64m4_t op1, size_t op2){
return vloxei64_v_u32m2(op0, op1, op2);
}
__rvv_overloaded vuint32m2_t vloxei64(vbool16_t op0, vuint32m2_t op1, const uint32_t * op2, vuint64m4_t op3, size_t op4){
return vloxei64_v_u32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32m4_t vloxei64(const uint32_t * op0, vuint64m8_t op1, size_t op2){
return vloxei64_v_u32m4(op0, op1, op2);
}
__rvv_overloaded vuint32m4_t vloxei64(vbool8_t op0, vuint32m4_t op1, const uint32_t * op2, vuint64m8_t op3, size_t op4){
return vloxei64_v_u32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint32mf2_t vloxei64(const uint32_t * op0, vuint64m1_t op1, size_t op2){
return vloxei64_v_u32mf2(op0, op1, op2);
}
__rvv_overloaded vuint32mf2_t vloxei64(vbool64_t op0, vuint32mf2_t op1, const uint32_t * op2, vuint64m1_t op3, size_t op4){
return vloxei64_v_u32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m1_t vloxei8(const int64_t * op0, vuint8mf8_t op1, size_t op2){
return vloxei8_v_i64m1(op0, op1, op2);
}
__rvv_overloaded vint64m1_t vloxei8(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint8mf8_t op3, size_t op4){
return vloxei8_v_i64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m2_t vloxei8(const int64_t * op0, vuint8mf4_t op1, size_t op2){
return vloxei8_v_i64m2(op0, op1, op2);
}
__rvv_overloaded vint64m2_t vloxei8(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint8mf4_t op3, size_t op4){
return vloxei8_v_i64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m4_t vloxei8(const int64_t * op0, vuint8mf2_t op1, size_t op2){
return vloxei8_v_i64m4(op0, op1, op2);
}
__rvv_overloaded vint64m4_t vloxei8(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint8mf2_t op3, size_t op4){
return vloxei8_v_i64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m8_t vloxei8(const int64_t * op0, vuint8m1_t op1, size_t op2){
return vloxei8_v_i64m8(op0, op1, op2);
}
__rvv_overloaded vint64m8_t vloxei8(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint8m1_t op3, size_t op4){
return vloxei8_v_i64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m1_t vloxei8(const uint64_t * op0, vuint8mf8_t op1, size_t op2){
return vloxei8_v_u64m1(op0, op1, op2);
}
__rvv_overloaded vuint64m1_t vloxei8(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint8mf8_t op3, size_t op4){
return vloxei8_v_u64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m2_t vloxei8(const uint64_t * op0, vuint8mf4_t op1, size_t op2){
return vloxei8_v_u64m2(op0, op1, op2);
}
__rvv_overloaded vuint64m2_t vloxei8(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint8mf4_t op3, size_t op4){
return vloxei8_v_u64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m4_t vloxei8(const uint64_t * op0, vuint8mf2_t op1, size_t op2){
return vloxei8_v_u64m4(op0, op1, op2);
}
__rvv_overloaded vuint64m4_t vloxei8(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint8mf2_t op3, size_t op4){
return vloxei8_v_u64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m8_t vloxei8(const uint64_t * op0, vuint8m1_t op1, size_t op2){
return vloxei8_v_u64m8(op0, op1, op2);
}
__rvv_overloaded vuint64m8_t vloxei8(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint8m1_t op3, size_t op4){
return vloxei8_v_u64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m1_t vloxei16(const int64_t * op0, vuint16mf4_t op1, size_t op2){
return vloxei16_v_i64m1(op0, op1, op2);
}
__rvv_overloaded vint64m1_t vloxei16(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint16mf4_t op3, size_t op4){
return vloxei16_v_i64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m2_t vloxei16(const int64_t * op0, vuint16mf2_t op1, size_t op2){
return vloxei16_v_i64m2(op0, op1, op2);
}
__rvv_overloaded vint64m2_t vloxei16(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint16mf2_t op3, size_t op4){
return vloxei16_v_i64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m4_t vloxei16(const int64_t * op0, vuint16m1_t op1, size_t op2){
return vloxei16_v_i64m4(op0, op1, op2);
}
__rvv_overloaded vint64m4_t vloxei16(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint16m1_t op3, size_t op4){
return vloxei16_v_i64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m8_t vloxei16(const int64_t * op0, vuint16m2_t op1, size_t op2){
return vloxei16_v_i64m8(op0, op1, op2);
}
__rvv_overloaded vint64m8_t vloxei16(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint16m2_t op3, size_t op4){
return vloxei16_v_i64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m1_t vloxei16(const uint64_t * op0, vuint16mf4_t op1, size_t op2){
return vloxei16_v_u64m1(op0, op1, op2);
}
__rvv_overloaded vuint64m1_t vloxei16(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint16mf4_t op3, size_t op4){
return vloxei16_v_u64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m2_t vloxei16(const uint64_t * op0, vuint16mf2_t op1, size_t op2){
return vloxei16_v_u64m2(op0, op1, op2);
}
__rvv_overloaded vuint64m2_t vloxei16(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint16mf2_t op3, size_t op4){
return vloxei16_v_u64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m4_t vloxei16(const uint64_t * op0, vuint16m1_t op1, size_t op2){
return vloxei16_v_u64m4(op0, op1, op2);
}
__rvv_overloaded vuint64m4_t vloxei16(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint16m1_t op3, size_t op4){
return vloxei16_v_u64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m8_t vloxei16(const uint64_t * op0, vuint16m2_t op1, size_t op2){
return vloxei16_v_u64m8(op0, op1, op2);
}
__rvv_overloaded vuint64m8_t vloxei16(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint16m2_t op3, size_t op4){
return vloxei16_v_u64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m1_t vloxei32(const int64_t * op0, vuint32mf2_t op1, size_t op2){
return vloxei32_v_i64m1(op0, op1, op2);
}
__rvv_overloaded vint64m1_t vloxei32(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint32mf2_t op3, size_t op4){
return vloxei32_v_i64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m2_t vloxei32(const int64_t * op0, vuint32m1_t op1, size_t op2){
return vloxei32_v_i64m2(op0, op1, op2);
}
__rvv_overloaded vint64m2_t vloxei32(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint32m1_t op3, size_t op4){
return vloxei32_v_i64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m4_t vloxei32(const int64_t * op0, vuint32m2_t op1, size_t op2){
return vloxei32_v_i64m4(op0, op1, op2);
}
__rvv_overloaded vint64m4_t vloxei32(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint32m2_t op3, size_t op4){
return vloxei32_v_i64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m8_t vloxei32(const int64_t * op0, vuint32m4_t op1, size_t op2){
return vloxei32_v_i64m8(op0, op1, op2);
}
__rvv_overloaded vint64m8_t vloxei32(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint32m4_t op3, size_t op4){
return vloxei32_v_i64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m1_t vloxei32(const uint64_t * op0, vuint32mf2_t op1, size_t op2){
return vloxei32_v_u64m1(op0, op1, op2);
}
__rvv_overloaded vuint64m1_t vloxei32(vbool64_t op0, vuint64m1_t op1, const uint64_t * op2, vuint32mf2_t op3, size_t op4){
return vloxei32_v_u64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m2_t vloxei32(const uint64_t * op0, vuint32m1_t op1, size_t op2){
return vloxei32_v_u64m2(op0, op1, op2);
}
__rvv_overloaded vuint64m2_t vloxei32(vbool32_t op0, vuint64m2_t op1, const uint64_t * op2, vuint32m1_t op3, size_t op4){
return vloxei32_v_u64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m4_t vloxei32(const uint64_t * op0, vuint32m2_t op1, size_t op2){
return vloxei32_v_u64m4(op0, op1, op2);
}
__rvv_overloaded vuint64m4_t vloxei32(vbool16_t op0, vuint64m4_t op1, const uint64_t * op2, vuint32m2_t op3, size_t op4){
return vloxei32_v_u64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vuint64m8_t vloxei32(const uint64_t * op0, vuint32m4_t op1, size_t op2){
return vloxei32_v_u64m8(op0, op1, op2);
}
__rvv_overloaded vuint64m8_t vloxei32(vbool8_t op0, vuint64m8_t op1, const uint64_t * op2, vuint32m4_t op3, size_t op4){
return vloxei32_v_u64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m1_t vloxei64(const int64_t * op0, vuint64m1_t op1, size_t op2){
return vloxei64_v_i64m1(op0, op1, op2);
}
__rvv_overloaded vint64m1_t vloxei64(vbool64_t op0, vint64m1_t op1, const int64_t * op2, vuint64m1_t op3, size_t op4){
return vloxei64_v_i64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m2_t vloxei64(const int64_t * op0, vuint64m2_t op1, size_t op2){
return vloxei64_v_i64m2(op0, op1, op2);
}
__rvv_overloaded vint64m2_t vloxei64(vbool32_t op0, vint64m2_t op1, const int64_t * op2, vuint64m2_t op3, size_t op4){
return vloxei64_v_i64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m4_t vloxei64(const int64_t * op0, vuint64m4_t op1, size_t op2){
return vloxei64_v_i64m4(op0, op1, op2);
}
__rvv_overloaded vint64m4_t vloxei64(vbool16_t op0, vint64m4_t op1, const int64_t * op2, vuint64m4_t op3, size_t op4){
return vloxei64_v_i64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vint64m8_t vloxei64(const int64_t * op0, vuint64m8_t op1, size_t op2){
return vloxei64_v_i64m8(op0, op1, op2);
}
__rvv_overloaded vint64m8_t vloxei64(vbool8_t op0, vint64m8_t op1, const int64_t * op2, vuint64m8_t op3, size_t op4){
return vloxei64_v_i64m8_m(op0, op1, op2, op3, op4);
}
#if defined(__riscv_f)
__rvv_overloaded vfloat32m1_t vloxei8(const float * op0, vuint8mf4_t op1, size_t op2){
return vloxei8_v_f32m1(op0, op1, op2);
}
__rvv_overloaded vfloat32m1_t vloxei8(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint8mf4_t op3, size_t op4){
return vloxei8_v_f32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m2_t vloxei8(const float * op0, vuint8mf2_t op1, size_t op2){
return vloxei8_v_f32m2(op0, op1, op2);
}
__rvv_overloaded vfloat32m2_t vloxei8(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint8mf2_t op3, size_t op4){
return vloxei8_v_f32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m4_t vloxei8(const float * op0, vuint8m1_t op1, size_t op2){
return vloxei8_v_f32m4(op0, op1, op2);
}
__rvv_overloaded vfloat32m4_t vloxei8(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint8m1_t op3, size_t op4){
return vloxei8_v_f32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m8_t vloxei8(const float * op0, vuint8m2_t op1, size_t op2){
return vloxei8_v_f32m8(op0, op1, op2);
}
__rvv_overloaded vfloat32m8_t vloxei8(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint8m2_t op3, size_t op4){
return vloxei8_v_f32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32mf2_t vloxei8(const float * op0, vuint8mf8_t op1, size_t op2){
return vloxei8_v_f32mf2(op0, op1, op2);
}
__rvv_overloaded vfloat32mf2_t vloxei8(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint8mf8_t op3, size_t op4){
return vloxei8_v_f32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m1_t vloxei16(const float * op0, vuint16mf2_t op1, size_t op2){
return vloxei16_v_f32m1(op0, op1, op2);
}
__rvv_overloaded vfloat32m1_t vloxei16(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint16mf2_t op3, size_t op4){
return vloxei16_v_f32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m2_t vloxei16(const float * op0, vuint16m1_t op1, size_t op2){
return vloxei16_v_f32m2(op0, op1, op2);
}
__rvv_overloaded vfloat32m2_t vloxei16(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint16m1_t op3, size_t op4){
return vloxei16_v_f32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m4_t vloxei16(const float * op0, vuint16m2_t op1, size_t op2){
return vloxei16_v_f32m4(op0, op1, op2);
}
__rvv_overloaded vfloat32m4_t vloxei16(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint16m2_t op3, size_t op4){
return vloxei16_v_f32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m8_t vloxei16(const float * op0, vuint16m4_t op1, size_t op2){
return vloxei16_v_f32m8(op0, op1, op2);
}
__rvv_overloaded vfloat32m8_t vloxei16(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint16m4_t op3, size_t op4){
return vloxei16_v_f32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32mf2_t vloxei16(const float * op0, vuint16mf4_t op1, size_t op2){
return vloxei16_v_f32mf2(op0, op1, op2);
}
__rvv_overloaded vfloat32mf2_t vloxei16(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint16mf4_t op3, size_t op4){
return vloxei16_v_f32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m1_t vloxei32(const float * op0, vuint32m1_t op1, size_t op2){
return vloxei32_v_f32m1(op0, op1, op2);
}
__rvv_overloaded vfloat32m1_t vloxei32(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint32m1_t op3, size_t op4){
return vloxei32_v_f32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m2_t vloxei32(const float * op0, vuint32m2_t op1, size_t op2){
return vloxei32_v_f32m2(op0, op1, op2);
}
__rvv_overloaded vfloat32m2_t vloxei32(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint32m2_t op3, size_t op4){
return vloxei32_v_f32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m4_t vloxei32(const float * op0, vuint32m4_t op1, size_t op2){
return vloxei32_v_f32m4(op0, op1, op2);
}
__rvv_overloaded vfloat32m4_t vloxei32(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint32m4_t op3, size_t op4){
return vloxei32_v_f32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m8_t vloxei32(const float * op0, vuint32m8_t op1, size_t op2){
return vloxei32_v_f32m8(op0, op1, op2);
}
__rvv_overloaded vfloat32m8_t vloxei32(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint32m8_t op3, size_t op4){
return vloxei32_v_f32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32mf2_t vloxei32(const float * op0, vuint32mf2_t op1, size_t op2){
return vloxei32_v_f32mf2(op0, op1, op2);
}
__rvv_overloaded vfloat32mf2_t vloxei32(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint32mf2_t op3, size_t op4){
return vloxei32_v_f32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m1_t vloxei64(const float * op0, vuint64m2_t op1, size_t op2){
return vloxei64_v_f32m1(op0, op1, op2);
}
__rvv_overloaded vfloat32m1_t vloxei64(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint64m2_t op3, size_t op4){
return vloxei64_v_f32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m2_t vloxei64(const float * op0, vuint64m4_t op1, size_t op2){
return vloxei64_v_f32m2(op0, op1, op2);
}
__rvv_overloaded vfloat32m2_t vloxei64(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint64m4_t op3, size_t op4){
return vloxei64_v_f32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m4_t vloxei64(const float * op0, vuint64m8_t op1, size_t op2){
return vloxei64_v_f32m4(op0, op1, op2);
}
__rvv_overloaded vfloat32m4_t vloxei64(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint64m8_t op3, size_t op4){
return vloxei64_v_f32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32mf2_t vloxei64(const float * op0, vuint64m1_t op1, size_t op2){
return vloxei64_v_f32mf2(op0, op1, op2);
}
__rvv_overloaded vfloat32mf2_t vloxei64(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint64m1_t op3, size_t op4){
return vloxei64_v_f32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m1_t vfadd(vfloat32m1_t op0, vfloat32m1_t op1, size_t op2){
return vfadd_vv_f32m1(op0, op1, op2);
}
__rvv_overloaded vfloat32m1_t vfadd(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, vfloat32m1_t op3, size_t op4){
return vfadd_vv_f32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m2_t vfadd(vfloat32m2_t op0, vfloat32m2_t op1, size_t op2){
return vfadd_vv_f32m2(op0, op1, op2);
}
__rvv_overloaded vfloat32m2_t vfadd(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, vfloat32m2_t op3, size_t op4){
return vfadd_vv_f32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m4_t vfadd(vfloat32m4_t op0, vfloat32m4_t op1, size_t op2){
return vfadd_vv_f32m4(op0, op1, op2);
}
__rvv_overloaded vfloat32m4_t vfadd(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, vfloat32m4_t op3, size_t op4){
return vfadd_vv_f32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m8_t vfadd(vfloat32m8_t op0, vfloat32m8_t op1, size_t op2){
return vfadd_vv_f32m8(op0, op1, op2);
}
__rvv_overloaded vfloat32m8_t vfadd(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, vfloat32m8_t op3, size_t op4){
return vfadd_vv_f32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32mf2_t vfadd(vfloat32mf2_t op0, vfloat32mf2_t op1, size_t op2){
return vfadd_vv_f32mf2(op0, op1, op2);
}
__rvv_overloaded vfloat32mf2_t vfadd(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, vfloat32mf2_t op3, size_t op4){
return vfadd_vv_f32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m1_t vfadd(vfloat32m1_t op0, float op1, size_t op2){
return vfadd_vf_f32m1(op0, op1, op2);
}
__rvv_overloaded vfloat32m1_t vfadd(vbool32_t op0, vfloat32m1_t op1, vfloat32m1_t op2, float op3, size_t op4){
return vfadd_vf_f32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m2_t vfadd(vfloat32m2_t op0, float op1, size_t op2){
return vfadd_vf_f32m2(op0, op1, op2);
}
__rvv_overloaded vfloat32m2_t vfadd(vbool16_t op0, vfloat32m2_t op1, vfloat32m2_t op2, float op3, size_t op4){
return vfadd_vf_f32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m4_t vfadd(vfloat32m4_t op0, float op1, size_t op2){
return vfadd_vf_f32m4(op0, op1, op2);
}
__rvv_overloaded vfloat32m4_t vfadd(vbool8_t op0, vfloat32m4_t op1, vfloat32m4_t op2, float op3, size_t op4){
return vfadd_vf_f32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m8_t vfadd(vfloat32m8_t op0, float op1, size_t op2){
return vfadd_vf_f32m8(op0, op1, op2);
}
__rvv_overloaded vfloat32m8_t vfadd(vbool4_t op0, vfloat32m8_t op1, vfloat32m8_t op2, float op3, size_t op4){
return vfadd_vf_f32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32mf2_t vfadd(vfloat32mf2_t op0, float op1, size_t op2){
return vfadd_vf_f32mf2(op0, op1, op2);
}
__rvv_overloaded vfloat32mf2_t vfadd(vbool64_t op0, vfloat32mf2_t op1, vfloat32mf2_t op2, float op3, size_t op4){
return vfadd_vf_f32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m1_t vle32(vbool32_t op0, vfloat32m1_t op1, const float * op2, size_t op3){
return vle32_v_f32m1_m(op0, op1, op2, op3);
}
__rvv_overloaded vfloat32m2_t vle32(vbool16_t op0, vfloat32m2_t op1, const float * op2, size_t op3){
return vle32_v_f32m2_m(op0, op1, op2, op3);
}
__rvv_overloaded vfloat32m4_t vle32(vbool8_t op0, vfloat32m4_t op1, const float * op2, size_t op3){
return vle32_v_f32m4_m(op0, op1, op2, op3);
}
__rvv_overloaded vfloat32m8_t vle32(vbool4_t op0, vfloat32m8_t op1, const float * op2, size_t op3){
return vle32_v_f32m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vfloat32mf2_t vle32(vbool64_t op0, vfloat32mf2_t op1, const float * op2, size_t op3){
return vle32_v_f32mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(float * op0, vfloat32m1_t op1, size_t op2){
return vse32_v_f32m1(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool32_t op0, float * op1, vfloat32m1_t op2, size_t op3){
return vse32_v_f32m1_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(float * op0, vfloat32m2_t op1, size_t op2){
return vse32_v_f32m2(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool16_t op0, float * op1, vfloat32m2_t op2, size_t op3){
return vse32_v_f32m2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(float * op0, vfloat32m4_t op1, size_t op2){
return vse32_v_f32m4(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool8_t op0, float * op1, vfloat32m4_t op2, size_t op3){
return vse32_v_f32m4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(float * op0, vfloat32m8_t op1, size_t op2){
return vse32_v_f32m8(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool4_t op0, float * op1, vfloat32m8_t op2, size_t op3){
return vse32_v_f32m8_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse32(float * op0, vfloat32mf2_t op1, size_t op2){
return vse32_v_f32mf2(op0, op1, op2);
}
__rvv_overloaded void vse32(vbool64_t op0, float * op1, vfloat32mf2_t op2, size_t op3){
return vse32_v_f32mf2_m(op0, op1, op2, op3);
}
__rvv_overloaded vfloat32m1_t vluxei8(const float * op0, vuint8mf4_t op1, size_t op2){
return vluxei8_v_f32m1(op0, op1, op2);
}
__rvv_overloaded vfloat32m1_t vluxei8(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint8mf4_t op3, size_t op4){
return vluxei8_v_f32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m2_t vluxei8(const float * op0, vuint8mf2_t op1, size_t op2){
return vluxei8_v_f32m2(op0, op1, op2);
}
__rvv_overloaded vfloat32m2_t vluxei8(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint8mf2_t op3, size_t op4){
return vluxei8_v_f32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m4_t vluxei8(const float * op0, vuint8m1_t op1, size_t op2){
return vluxei8_v_f32m4(op0, op1, op2);
}
__rvv_overloaded vfloat32m4_t vluxei8(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint8m1_t op3, size_t op4){
return vluxei8_v_f32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m8_t vluxei8(const float * op0, vuint8m2_t op1, size_t op2){
return vluxei8_v_f32m8(op0, op1, op2);
}
__rvv_overloaded vfloat32m8_t vluxei8(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint8m2_t op3, size_t op4){
return vluxei8_v_f32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32mf2_t vluxei8(const float * op0, vuint8mf8_t op1, size_t op2){
return vluxei8_v_f32mf2(op0, op1, op2);
}
__rvv_overloaded vfloat32mf2_t vluxei8(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint8mf8_t op3, size_t op4){
return vluxei8_v_f32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m1_t vluxei16(const float * op0, vuint16mf2_t op1, size_t op2){
return vluxei16_v_f32m1(op0, op1, op2);
}
__rvv_overloaded vfloat32m1_t vluxei16(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint16mf2_t op3, size_t op4){
return vluxei16_v_f32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m2_t vluxei16(const float * op0, vuint16m1_t op1, size_t op2){
return vluxei16_v_f32m2(op0, op1, op2);
}
__rvv_overloaded vfloat32m2_t vluxei16(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint16m1_t op3, size_t op4){
return vluxei16_v_f32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m4_t vluxei16(const float * op0, vuint16m2_t op1, size_t op2){
return vluxei16_v_f32m4(op0, op1, op2);
}
__rvv_overloaded vfloat32m4_t vluxei16(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint16m2_t op3, size_t op4){
return vluxei16_v_f32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m8_t vluxei16(const float * op0, vuint16m4_t op1, size_t op2){
return vluxei16_v_f32m8(op0, op1, op2);
}
__rvv_overloaded vfloat32m8_t vluxei16(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint16m4_t op3, size_t op4){
return vluxei16_v_f32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32mf2_t vluxei16(const float * op0, vuint16mf4_t op1, size_t op2){
return vluxei16_v_f32mf2(op0, op1, op2);
}
__rvv_overloaded vfloat32mf2_t vluxei16(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint16mf4_t op3, size_t op4){
return vluxei16_v_f32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m1_t vluxei32(const float * op0, vuint32m1_t op1, size_t op2){
return vluxei32_v_f32m1(op0, op1, op2);
}
__rvv_overloaded vfloat32m1_t vluxei32(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint32m1_t op3, size_t op4){
return vluxei32_v_f32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m2_t vluxei32(const float * op0, vuint32m2_t op1, size_t op2){
return vluxei32_v_f32m2(op0, op1, op2);
}
__rvv_overloaded vfloat32m2_t vluxei32(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint32m2_t op3, size_t op4){
return vluxei32_v_f32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m4_t vluxei32(const float * op0, vuint32m4_t op1, size_t op2){
return vluxei32_v_f32m4(op0, op1, op2);
}
__rvv_overloaded vfloat32m4_t vluxei32(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint32m4_t op3, size_t op4){
return vluxei32_v_f32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m8_t vluxei32(const float * op0, vuint32m8_t op1, size_t op2){
return vluxei32_v_f32m8(op0, op1, op2);
}
__rvv_overloaded vfloat32m8_t vluxei32(vbool4_t op0, vfloat32m8_t op1, const float * op2, vuint32m8_t op3, size_t op4){
return vluxei32_v_f32m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32mf2_t vluxei32(const float * op0, vuint32mf2_t op1, size_t op2){
return vluxei32_v_f32mf2(op0, op1, op2);
}
__rvv_overloaded vfloat32mf2_t vluxei32(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint32mf2_t op3, size_t op4){
return vluxei32_v_f32mf2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m1_t vluxei64(const float * op0, vuint64m2_t op1, size_t op2){
return vluxei64_v_f32m1(op0, op1, op2);
}
__rvv_overloaded vfloat32m1_t vluxei64(vbool32_t op0, vfloat32m1_t op1, const float * op2, vuint64m2_t op3, size_t op4){
return vluxei64_v_f32m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m2_t vluxei64(const float * op0, vuint64m4_t op1, size_t op2){
return vluxei64_v_f32m2(op0, op1, op2);
}
__rvv_overloaded vfloat32m2_t vluxei64(vbool16_t op0, vfloat32m2_t op1, const float * op2, vuint64m4_t op3, size_t op4){
return vluxei64_v_f32m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32m4_t vluxei64(const float * op0, vuint64m8_t op1, size_t op2){
return vluxei64_v_f32m4(op0, op1, op2);
}
__rvv_overloaded vfloat32m4_t vluxei64(vbool8_t op0, vfloat32m4_t op1, const float * op2, vuint64m8_t op3, size_t op4){
return vluxei64_v_f32m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat32mf2_t vluxei64(const float * op0, vuint64m1_t op1, size_t op2){
return vluxei64_v_f32mf2(op0, op1, op2);
}
__rvv_overloaded vfloat32mf2_t vluxei64(vbool64_t op0, vfloat32mf2_t op1, const float * op2, vuint64m1_t op3, size_t op4){
return vluxei64_v_f32mf2_m(op0, op1, op2, op3, op4);
}
#endif
#if defined(__riscv_d)
__rvv_overloaded vfloat64m1_t vloxei8(const double * op0, vuint8mf8_t op1, size_t op2){
return vloxei8_v_f64m1(op0, op1, op2);
}
__rvv_overloaded vfloat64m1_t vloxei8(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint8mf8_t op3, size_t op4){
return vloxei8_v_f64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m2_t vloxei8(const double * op0, vuint8mf4_t op1, size_t op2){
return vloxei8_v_f64m2(op0, op1, op2);
}
__rvv_overloaded vfloat64m2_t vloxei8(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint8mf4_t op3, size_t op4){
return vloxei8_v_f64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m4_t vloxei8(const double * op0, vuint8mf2_t op1, size_t op2){
return vloxei8_v_f64m4(op0, op1, op2);
}
__rvv_overloaded vfloat64m4_t vloxei8(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint8mf2_t op3, size_t op4){
return vloxei8_v_f64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m8_t vloxei8(const double * op0, vuint8m1_t op1, size_t op2){
return vloxei8_v_f64m8(op0, op1, op2);
}
__rvv_overloaded vfloat64m8_t vloxei8(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint8m1_t op3, size_t op4){
return vloxei8_v_f64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m1_t vloxei16(const double * op0, vuint16mf4_t op1, size_t op2){
return vloxei16_v_f64m1(op0, op1, op2);
}
__rvv_overloaded vfloat64m1_t vloxei16(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint16mf4_t op3, size_t op4){
return vloxei16_v_f64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m2_t vloxei16(const double * op0, vuint16mf2_t op1, size_t op2){
return vloxei16_v_f64m2(op0, op1, op2);
}
__rvv_overloaded vfloat64m2_t vloxei16(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint16mf2_t op3, size_t op4){
return vloxei16_v_f64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m4_t vloxei16(const double * op0, vuint16m1_t op1, size_t op2){
return vloxei16_v_f64m4(op0, op1, op2);
}
__rvv_overloaded vfloat64m4_t vloxei16(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint16m1_t op3, size_t op4){
return vloxei16_v_f64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m8_t vloxei16(const double * op0, vuint16m2_t op1, size_t op2){
return vloxei16_v_f64m8(op0, op1, op2);
}
__rvv_overloaded vfloat64m8_t vloxei16(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint16m2_t op3, size_t op4){
return vloxei16_v_f64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m1_t vloxei32(const double * op0, vuint32mf2_t op1, size_t op2){
return vloxei32_v_f64m1(op0, op1, op2);
}
__rvv_overloaded vfloat64m1_t vloxei32(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint32mf2_t op3, size_t op4){
return vloxei32_v_f64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m2_t vloxei32(const double * op0, vuint32m1_t op1, size_t op2){
return vloxei32_v_f64m2(op0, op1, op2);
}
__rvv_overloaded vfloat64m2_t vloxei32(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint32m1_t op3, size_t op4){
return vloxei32_v_f64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m4_t vloxei32(const double * op0, vuint32m2_t op1, size_t op2){
return vloxei32_v_f64m4(op0, op1, op2);
}
__rvv_overloaded vfloat64m4_t vloxei32(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint32m2_t op3, size_t op4){
return vloxei32_v_f64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m8_t vloxei32(const double * op0, vuint32m4_t op1, size_t op2){
return vloxei32_v_f64m8(op0, op1, op2);
}
__rvv_overloaded vfloat64m8_t vloxei32(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint32m4_t op3, size_t op4){
return vloxei32_v_f64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m1_t vloxei64(const double * op0, vuint64m1_t op1, size_t op2){
return vloxei64_v_f64m1(op0, op1, op2);
}
__rvv_overloaded vfloat64m1_t vloxei64(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint64m1_t op3, size_t op4){
return vloxei64_v_f64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m2_t vloxei64(const double * op0, vuint64m2_t op1, size_t op2){
return vloxei64_v_f64m2(op0, op1, op2);
}
__rvv_overloaded vfloat64m2_t vloxei64(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint64m2_t op3, size_t op4){
return vloxei64_v_f64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m4_t vloxei64(const double * op0, vuint64m4_t op1, size_t op2){
return vloxei64_v_f64m4(op0, op1, op2);
}
__rvv_overloaded vfloat64m4_t vloxei64(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint64m4_t op3, size_t op4){
return vloxei64_v_f64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m8_t vloxei64(const double * op0, vuint64m8_t op1, size_t op2){
return vloxei64_v_f64m8(op0, op1, op2);
}
__rvv_overloaded vfloat64m8_t vloxei64(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint64m8_t op3, size_t op4){
return vloxei64_v_f64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m1_t vfadd(vfloat64m1_t op0, vfloat64m1_t op1, size_t op2){
return vfadd_vv_f64m1(op0, op1, op2);
}
__rvv_overloaded vfloat64m1_t vfadd(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, vfloat64m1_t op3, size_t op4){
return vfadd_vv_f64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m2_t vfadd(vfloat64m2_t op0, vfloat64m2_t op1, size_t op2){
return vfadd_vv_f64m2(op0, op1, op2);
}
__rvv_overloaded vfloat64m2_t vfadd(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, vfloat64m2_t op3, size_t op4){
return vfadd_vv_f64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m4_t vfadd(vfloat64m4_t op0, vfloat64m4_t op1, size_t op2){
return vfadd_vv_f64m4(op0, op1, op2);
}
__rvv_overloaded vfloat64m4_t vfadd(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, vfloat64m4_t op3, size_t op4){
return vfadd_vv_f64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m8_t vfadd(vfloat64m8_t op0, vfloat64m8_t op1, size_t op2){
return vfadd_vv_f64m8(op0, op1, op2);
}
__rvv_overloaded vfloat64m8_t vfadd(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, vfloat64m8_t op3, size_t op4){
return vfadd_vv_f64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m1_t vfadd(vfloat64m1_t op0, double op1, size_t op2){
return vfadd_vf_f64m1(op0, op1, op2);
}
__rvv_overloaded vfloat64m1_t vfadd(vbool64_t op0, vfloat64m1_t op1, vfloat64m1_t op2, double op3, size_t op4){
return vfadd_vf_f64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m2_t vfadd(vfloat64m2_t op0, double op1, size_t op2){
return vfadd_vf_f64m2(op0, op1, op2);
}
__rvv_overloaded vfloat64m2_t vfadd(vbool32_t op0, vfloat64m2_t op1, vfloat64m2_t op2, double op3, size_t op4){
return vfadd_vf_f64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m4_t vfadd(vfloat64m4_t op0, double op1, size_t op2){
return vfadd_vf_f64m4(op0, op1, op2);
}
__rvv_overloaded vfloat64m4_t vfadd(vbool16_t op0, vfloat64m4_t op1, vfloat64m4_t op2, double op3, size_t op4){
return vfadd_vf_f64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m8_t vfadd(vfloat64m8_t op0, double op1, size_t op2){
return vfadd_vf_f64m8(op0, op1, op2);
}
__rvv_overloaded vfloat64m8_t vfadd(vbool8_t op0, vfloat64m8_t op1, vfloat64m8_t op2, double op3, size_t op4){
return vfadd_vf_f64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m1_t vle64(vbool64_t op0, vfloat64m1_t op1, const double * op2, size_t op3){
return vle64_v_f64m1_m(op0, op1, op2, op3);
}
__rvv_overloaded vfloat64m2_t vle64(vbool32_t op0, vfloat64m2_t op1, const double * op2, size_t op3){
return vle64_v_f64m2_m(op0, op1, op2, op3);
}
__rvv_overloaded vfloat64m4_t vle64(vbool16_t op0, vfloat64m4_t op1, const double * op2, size_t op3){
return vle64_v_f64m4_m(op0, op1, op2, op3);
}
__rvv_overloaded vfloat64m8_t vle64(vbool8_t op0, vfloat64m8_t op1, const double * op2, size_t op3){
return vle64_v_f64m8_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(double * op0, vfloat64m1_t op1, size_t op2){
return vse64_v_f64m1(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool64_t op0, double * op1, vfloat64m1_t op2, size_t op3){
return vse64_v_f64m1_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(double * op0, vfloat64m2_t op1, size_t op2){
return vse64_v_f64m2(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool32_t op0, double * op1, vfloat64m2_t op2, size_t op3){
return vse64_v_f64m2_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(double * op0, vfloat64m4_t op1, size_t op2){
return vse64_v_f64m4(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool16_t op0, double * op1, vfloat64m4_t op2, size_t op3){
return vse64_v_f64m4_m(op0, op1, op2, op3);
}
__rvv_overloaded void vse64(double * op0, vfloat64m8_t op1, size_t op2){
return vse64_v_f64m8(op0, op1, op2);
}
__rvv_overloaded void vse64(vbool8_t op0, double * op1, vfloat64m8_t op2, size_t op3){
return vse64_v_f64m8_m(op0, op1, op2, op3);
}
__rvv_overloaded vfloat64m1_t vluxei8(const double * op0, vuint8mf8_t op1, size_t op2){
return vluxei8_v_f64m1(op0, op1, op2);
}
__rvv_overloaded vfloat64m1_t vluxei8(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint8mf8_t op3, size_t op4){
return vluxei8_v_f64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m2_t vluxei8(const double * op0, vuint8mf4_t op1, size_t op2){
return vluxei8_v_f64m2(op0, op1, op2);
}
__rvv_overloaded vfloat64m2_t vluxei8(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint8mf4_t op3, size_t op4){
return vluxei8_v_f64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m4_t vluxei8(const double * op0, vuint8mf2_t op1, size_t op2){
return vluxei8_v_f64m4(op0, op1, op2);
}
__rvv_overloaded vfloat64m4_t vluxei8(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint8mf2_t op3, size_t op4){
return vluxei8_v_f64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m8_t vluxei8(const double * op0, vuint8m1_t op1, size_t op2){
return vluxei8_v_f64m8(op0, op1, op2);
}
__rvv_overloaded vfloat64m8_t vluxei8(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint8m1_t op3, size_t op4){
return vluxei8_v_f64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m1_t vluxei16(const double * op0, vuint16mf4_t op1, size_t op2){
return vluxei16_v_f64m1(op0, op1, op2);
}
__rvv_overloaded vfloat64m1_t vluxei16(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint16mf4_t op3, size_t op4){
return vluxei16_v_f64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m2_t vluxei16(const double * op0, vuint16mf2_t op1, size_t op2){
return vluxei16_v_f64m2(op0, op1, op2);
}
__rvv_overloaded vfloat64m2_t vluxei16(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint16mf2_t op3, size_t op4){
return vluxei16_v_f64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m4_t vluxei16(const double * op0, vuint16m1_t op1, size_t op2){
return vluxei16_v_f64m4(op0, op1, op2);
}
__rvv_overloaded vfloat64m4_t vluxei16(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint16m1_t op3, size_t op4){
return vluxei16_v_f64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m8_t vluxei16(const double * op0, vuint16m2_t op1, size_t op2){
return vluxei16_v_f64m8(op0, op1, op2);
}
__rvv_overloaded vfloat64m8_t vluxei16(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint16m2_t op3, size_t op4){
return vluxei16_v_f64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m1_t vluxei32(const double * op0, vuint32mf2_t op1, size_t op2){
return vluxei32_v_f64m1(op0, op1, op2);
}
__rvv_overloaded vfloat64m1_t vluxei32(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint32mf2_t op3, size_t op4){
return vluxei32_v_f64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m2_t vluxei32(const double * op0, vuint32m1_t op1, size_t op2){
return vluxei32_v_f64m2(op0, op1, op2);
}
__rvv_overloaded vfloat64m2_t vluxei32(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint32m1_t op3, size_t op4){
return vluxei32_v_f64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m4_t vluxei32(const double * op0, vuint32m2_t op1, size_t op2){
return vluxei32_v_f64m4(op0, op1, op2);
}
__rvv_overloaded vfloat64m4_t vluxei32(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint32m2_t op3, size_t op4){
return vluxei32_v_f64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m8_t vluxei32(const double * op0, vuint32m4_t op1, size_t op2){
return vluxei32_v_f64m8(op0, op1, op2);
}
__rvv_overloaded vfloat64m8_t vluxei32(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint32m4_t op3, size_t op4){
return vluxei32_v_f64m8_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m1_t vluxei64(const double * op0, vuint64m1_t op1, size_t op2){
return vluxei64_v_f64m1(op0, op1, op2);
}
__rvv_overloaded vfloat64m1_t vluxei64(vbool64_t op0, vfloat64m1_t op1, const double * op2, vuint64m1_t op3, size_t op4){
return vluxei64_v_f64m1_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m2_t vluxei64(const double * op0, vuint64m2_t op1, size_t op2){
return vluxei64_v_f64m2(op0, op1, op2);
}
__rvv_overloaded vfloat64m2_t vluxei64(vbool32_t op0, vfloat64m2_t op1, const double * op2, vuint64m2_t op3, size_t op4){
return vluxei64_v_f64m2_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m4_t vluxei64(const double * op0, vuint64m4_t op1, size_t op2){
return vluxei64_v_f64m4(op0, op1, op2);
}
__rvv_overloaded vfloat64m4_t vluxei64(vbool16_t op0, vfloat64m4_t op1, const double * op2, vuint64m4_t op3, size_t op4){
return vluxei64_v_f64m4_m(op0, op1, op2, op3, op4);
}
__rvv_overloaded vfloat64m8_t vluxei64(const double * op0, vuint64m8_t op1, size_t op2){
return vluxei64_v_f64m8(op0, op1, op2);
}
__rvv_overloaded vfloat64m8_t vluxei64(vbool8_t op0, vfloat64m8_t op1, const double * op2, vuint64m8_t op3, size_t op4){
return vluxei64_v_f64m8_m(op0, op1, op2, op3, op4);
}
#endif
#ifdef __cplusplus
}
#endif // __riscv_vector
#endif // __RISCV_VECTOR_H
| 202,915 |
4,184 | <filename>src/views_vtab.cc
/**
* Copyright (c) 2015, <NAME>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of Timothy Stack nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include <assert.h>
#include <unistd.h>
#include <string.h>
#include "lnav.hh"
#include "base/injector.bind.hh"
#include "base/lnav_log.hh"
#include "sql_util.hh"
#include "views_vtab.hh"
#include "view_curses.hh"
using namespace std;
template<>
struct from_sqlite<lnav_view_t> {
inline lnav_view_t operator()(int argc, sqlite3_value **val, int argi) {
const char *view_name = (const char *) sqlite3_value_text(val[argi]);
auto view_index_opt = view_from_string(view_name);
if (!view_index_opt) {
throw from_sqlite_conversion_error("lnav view name", argi);
}
return view_index_opt.value();
}
};
template<>
struct from_sqlite<text_filter::type_t> {
inline text_filter::type_t operator()(int argc, sqlite3_value **val, int argi) {
const char *type_name = (const char *) sqlite3_value_text(val[argi]);
if (strcasecmp(type_name, "in") == 0) {
return text_filter::INCLUDE;
} else if (strcasecmp(type_name, "out") == 0) {
return text_filter::EXCLUDE;
}
throw from_sqlite_conversion_error("filter type", argi);
}
};
template<>
struct from_sqlite<pair<string, auto_mem<pcre>>> {
inline pair<string, auto_mem<pcre>> operator()(int argc, sqlite3_value **val, int argi) {
const char *pattern = (const char *) sqlite3_value_text(val[argi]);
const char *errptr;
auto_mem<pcre> code;
int eoff;
if (pattern == nullptr || pattern[0] == '\0') {
throw from_sqlite_conversion_error("non-empty pattern", argi);
}
code = pcre_compile(pattern,
PCRE_CASELESS,
&errptr,
&eoff,
nullptr);
if (code == nullptr) {
throw sqlite_func_error(
"Invalid regular expression in column {}: {} at offset {}",
argi, errptr, eoff);
}
return make_pair(string(pattern), std::move(code));
}
};
struct lnav_views : public tvt_iterator_cursor<lnav_views> {
static constexpr const char *NAME = "lnav_views";
static constexpr const char *CREATE_STMT = R"(
-- Access lnav's views through this table.
CREATE TABLE lnav_views (
name TEXT PRIMARY KEY, -- The name of the view.
top INTEGER, -- The number of the line at the top of the view, starting from zero.
left INTEGER, -- The left position of the viewport.
height INTEGER, -- The height of the viewport.
inner_height INTEGER, -- The number of lines in the view.
top_time DATETIME, -- The time of the top line in the view, if the content is time-based.
top_file TEXT, -- The file the top line is from.
paused INTEGER, -- Indicates if the view is paused and will not load new data.
search TEXT, -- The text to search for in the view.
filtering INTEGER -- Indicates if the view is applying filters.
);
)";
using iterator = textview_curses *;
iterator begin() {
return std::begin(lnav_data.ld_views);
}
iterator end() {
return std::end(lnav_data.ld_views);
}
int get_column(cursor &vc, sqlite3_context *ctx, int col) {
lnav_view_t view_index = (lnav_view_t) distance(std::begin(lnav_data.ld_views), vc.iter);
textview_curses &tc = *vc.iter;
unsigned long width;
vis_line_t height;
tc.get_dimensions(height, width);
switch (col) {
case 0:
sqlite3_result_text(ctx,
lnav_view_strings[view_index], -1,
SQLITE_STATIC);
break;
case 1:
sqlite3_result_int(ctx, (int) tc.get_top());
break;
case 2:
sqlite3_result_int(ctx, tc.get_left());
break;
case 3:
sqlite3_result_int(ctx, height);
break;
case 4:
sqlite3_result_int(ctx, tc.get_inner_height());
break;
case 5: {
auto *time_source = dynamic_cast<text_time_translator *>(tc.get_sub_source());
if (time_source != nullptr && tc.get_inner_height() > 0) {
auto top_time_opt = time_source->time_for_row(tc.get_top());
if (top_time_opt) {
char timestamp[64];
sql_strftime(timestamp, sizeof(timestamp), top_time_opt.value(), 'T');
sqlite3_result_text(ctx, timestamp, -1, SQLITE_TRANSIENT);
} else {
sqlite3_result_null(ctx);
}
} else {
sqlite3_result_null(ctx);
}
break;
}
case 6: {
to_sqlite(ctx, tc.map_top_row([](const auto& al) {
return get_string_attr(al.get_attrs(), &logline::L_FILE) | [](const auto* sa) {
auto lf = (logfile *) sa->sa_value.sav_ptr;
return nonstd::make_optional(lf->get_filename());
};
}));
break;
}
case 7:
sqlite3_result_int(ctx, tc.is_paused());
break;
case 8:
to_sqlite(ctx, tc.get_current_search());
break;
case 9: {
auto tss = tc.get_sub_source();
if (tss != nullptr && tss->tss_supports_filtering) {
sqlite3_result_int(ctx, tss->tss_apply_filters);
} else {
sqlite3_result_int(ctx, 0);
}
break;
}
}
return SQLITE_OK;
}
int delete_row(sqlite3_vtab *tab, sqlite3_int64 rowid) {
tab->zErrMsg = sqlite3_mprintf(
"Rows cannot be deleted from the lnav_views table");
return SQLITE_ERROR;
}
int insert_row(sqlite3_vtab *tab, sqlite3_int64 &rowid_out) {
tab->zErrMsg = sqlite3_mprintf(
"Rows cannot be inserted into the lnav_views table");
return SQLITE_ERROR;
};
int update_row(sqlite3_vtab *tab,
sqlite3_int64 &index,
const char *name,
int64_t top_row,
int64_t left,
int64_t height,
int64_t inner_height,
const char *top_time,
const char *top_file,
bool is_paused,
const char *search,
bool do_filtering) {
textview_curses &tc = lnav_data.ld_views[index];
text_time_translator *time_source = dynamic_cast<text_time_translator *>(tc.get_sub_source());
if (tc.get_top() != top_row) {
tc.set_top(vis_line_t(top_row));
} else if (top_time != nullptr && time_source != nullptr) {
date_time_scanner dts;
struct timeval tv;
if (dts.convert_to_timeval(top_time, -1, nullptr, tv)) {
auto last_time_opt = time_source->time_for_row(tc.get_top());
if (last_time_opt) {
auto last_time = last_time_opt.value();
if (tv != last_time) {
time_source->row_for_time(tv) | [&tc](auto row) {
tc.set_top(row);
};
}
}
} else {
tab->zErrMsg = sqlite3_mprintf("Invalid time: %s", top_time);
return SQLITE_ERROR;
}
}
tc.set_left(left);
tc.set_paused(is_paused);
tc.execute_search(search);
auto tss = tc.get_sub_source();
if (tss != nullptr &&
tss->tss_supports_filtering &&
tss->tss_apply_filters != do_filtering) {
tss->tss_apply_filters = do_filtering;
tss->text_filters_changed();
}
return SQLITE_OK;
};
};
struct lnav_view_stack : public tvt_iterator_cursor<lnav_view_stack> {
using iterator = vector<textview_curses *>::iterator;
static constexpr const char *NAME = "lnav_view_stack";
static constexpr const char *CREATE_STMT = R"(
-- Access lnav's view stack through this table.
CREATE TABLE lnav_view_stack (
name TEXT
);
)";
iterator begin() {
return lnav_data.ld_view_stack.begin();
}
iterator end() {
return lnav_data.ld_view_stack.end();
}
int get_column(cursor &vc, sqlite3_context *ctx, int col) {
textview_curses *tc = *vc.iter;
auto view = lnav_view_t(tc - lnav_data.ld_views);
switch (col) {
case 0:
sqlite3_result_text(ctx,
lnav_view_strings[view], -1,
SQLITE_STATIC);
break;
}
return SQLITE_OK;
};
int delete_row(sqlite3_vtab *tab, sqlite3_int64 rowid) {
if ((size_t)rowid != lnav_data.ld_view_stack.size() - 1) {
tab->zErrMsg = sqlite3_mprintf(
"Only the top view in the stack can be deleted");
return SQLITE_ERROR;
}
lnav_data.ld_last_view = *lnav_data.ld_view_stack.top();
lnav_data.ld_view_stack.pop_back();
return SQLITE_OK;
};
int insert_row(sqlite3_vtab *tab,
sqlite3_int64 &rowid_out,
lnav_view_t view_index) {
textview_curses *tc = &lnav_data.ld_views[view_index];
ensure_view(tc);
rowid_out = lnav_data.ld_view_stack.size() - 1;
return SQLITE_OK;
};
int update_row(sqlite3_vtab *tab, sqlite3_int64 &index) {
tab->zErrMsg = sqlite3_mprintf(
"The lnav_view_stack table cannot be updated");
return SQLITE_ERROR;
};
};
struct lnav_view_filter_base {
struct iterator {
using difference_type = int;
using value_type = text_filter;
using pointer = text_filter *;
using reference = text_filter &;
using iterator_category = forward_iterator_tag;
lnav_view_t i_view_index;
int i_filter_index;
iterator(lnav_view_t view = LNV_LOG, int filter = -1)
: i_view_index(view), i_filter_index(filter) {
}
iterator &operator++() {
while (this->i_view_index < LNV__MAX) {
textview_curses &tc = lnav_data.ld_views[this->i_view_index];
text_sub_source *tss = tc.get_sub_source();
if (tss == nullptr) {
this->i_view_index = lnav_view_t(this->i_view_index + 1);
continue;
}
filter_stack &fs = tss->get_filters();
this->i_filter_index += 1;
if (this->i_filter_index >= (ssize_t) fs.size()) {
this->i_filter_index = -1;
this->i_view_index = lnav_view_t(this->i_view_index + 1);
} else {
break;
}
}
return *this;
}
bool operator==(const iterator &other) const {
return this->i_view_index == other.i_view_index &&
this->i_filter_index == other.i_filter_index;
}
bool operator!=(const iterator &other) const {
return !(*this == other);
}
};
iterator begin() {
iterator retval = iterator();
return ++retval;
}
iterator end() {
return {LNV__MAX, -1};
}
sqlite_int64 get_rowid(iterator iter) {
textview_curses &tc = lnav_data.ld_views[iter.i_view_index];
text_sub_source *tss = tc.get_sub_source();
filter_stack &fs = tss->get_filters();
auto &tf = *(fs.begin() + iter.i_filter_index);
sqlite_int64 retval = iter.i_view_index;
retval = retval << 32;
retval = retval | tf->get_index();
return retval;
}
};
struct lnav_view_filters : public tvt_iterator_cursor<lnav_view_filters>,
public lnav_view_filter_base {
static constexpr const char *NAME = "lnav_view_filters";
static constexpr const char *CREATE_STMT = R"(
-- Access lnav's filters through this table.
CREATE TABLE lnav_view_filters (
view_name TEXT, -- The name of the view.
filter_id INTEGER DEFAULT 0, -- The filter identifier.
enabled INTEGER DEFAULT 1, -- Indicates if the filter is enabled/disabled.
type TEXT DEFAULT 'out', -- The type of filter (i.e. in/out).
pattern TEXT -- The filter pattern.
);
)";
int get_column(cursor &vc, sqlite3_context *ctx, int col) {
textview_curses &tc = lnav_data.ld_views[vc.iter.i_view_index];
text_sub_source *tss = tc.get_sub_source();
filter_stack &fs = tss->get_filters();
auto tf = *(fs.begin() + vc.iter.i_filter_index);
switch (col) {
case 0:
sqlite3_result_text(ctx,
lnav_view_strings[vc.iter.i_view_index], -1,
SQLITE_STATIC);
break;
case 1:
to_sqlite(ctx, tf->get_index());
break;
case 2:
sqlite3_result_int(ctx, tf->is_enabled());
break;
case 3:
switch (tf->get_type()) {
case text_filter::INCLUDE:
sqlite3_result_text(ctx, "in", 2, SQLITE_STATIC);
break;
case text_filter::EXCLUDE:
sqlite3_result_text(ctx, "out", 3, SQLITE_STATIC);
break;
default:
ensure(0);
}
break;
case 4:
sqlite3_result_text(ctx,
tf->get_id().c_str(),
-1,
SQLITE_TRANSIENT);
break;
}
return SQLITE_OK;
}
int insert_row(sqlite3_vtab *tab,
sqlite3_int64 &rowid_out,
lnav_view_t view_index,
nonstd::optional<int64_t> _filter_id,
nonstd::optional<bool> enabled,
nonstd::optional<text_filter::type_t> type,
pair<string, auto_mem<pcre>> pattern) {
textview_curses &tc = lnav_data.ld_views[view_index];
text_sub_source *tss = tc.get_sub_source();
filter_stack &fs = tss->get_filters();
auto filter_index = fs.next_index();
if (!filter_index) {
throw sqlite_func_error("Too many filters");
}
auto pf = make_shared<pcre_filter>(
type.value_or(text_filter::type_t::EXCLUDE),
pattern.first,
*filter_index,
pattern.second.release());
fs.add_filter(pf);
if (!enabled.value_or(true)) {
pf->disable();
}
tss->text_filters_changed();
tc.set_needs_update();
return SQLITE_OK;
}
int delete_row(sqlite3_vtab *tab, sqlite3_int64 rowid) {
auto view_index = lnav_view_t(rowid >> 32);
size_t filter_index = rowid & 0xffffffffLL;
textview_curses &tc = lnav_data.ld_views[view_index];
text_sub_source *tss = tc.get_sub_source();
filter_stack &fs = tss->get_filters();
for (const auto &iter : fs) {
if (iter->get_index() == filter_index) {
fs.delete_filter(iter->get_id());
tss->text_filters_changed();
break;
}
}
tc.set_needs_update();
return SQLITE_OK;
}
int update_row(sqlite3_vtab *tab,
sqlite3_int64 &rowid,
lnav_view_t new_view_index,
int64_t new_filter_id,
bool enabled,
text_filter::type_t type,
pair<string, auto_mem<pcre>> pattern) {
auto view_index = lnav_view_t(rowid >> 32);
auto filter_index = rowid & 0xffffffffLL;
textview_curses &tc = lnav_data.ld_views[view_index];
text_sub_source *tss = tc.get_sub_source();
filter_stack &fs = tss->get_filters();
auto iter = fs.begin();
for (; iter != fs.end(); ++iter) {
if ((*iter)->get_index() == (size_t) filter_index) {
break;
}
}
shared_ptr<text_filter> tf = *iter;
if (new_view_index != view_index) {
tab->zErrMsg = sqlite3_mprintf(
"The view for a filter cannot be changed");
return SQLITE_ERROR;
}
tf->lf_deleted = true;
tss->text_filters_changed();
auto pf = make_shared<pcre_filter>(type,
pattern.first,
tf->get_index(),
pattern.second.release());
if (!enabled) {
pf->disable();
}
*iter = pf;
tss->text_filters_changed();
tc.set_needs_update();
return SQLITE_OK;
};
};
struct lnav_view_filter_stats : public tvt_iterator_cursor<lnav_view_filter_stats>,
public lnav_view_filter_base {
static constexpr const char *NAME = "lnav_view_filter_stats";
static constexpr const char *CREATE_STMT = R"(
-- Access statistics for filters through this table.
CREATE TABLE lnav_view_filter_stats (
view_name TEXT, -- The name of the view.
filter_id INTEGER, -- The filter identifier.
hits INTEGER -- The number of lines that matched this filter.
);
)";
int get_column(cursor &vc, sqlite3_context *ctx, int col) {
textview_curses &tc = lnav_data.ld_views[vc.iter.i_view_index];
text_sub_source *tss = tc.get_sub_source();
filter_stack &fs = tss->get_filters();
auto tf = *(fs.begin() + vc.iter.i_filter_index);
switch (col) {
case 0:
sqlite3_result_text(ctx,
lnav_view_strings[vc.iter.i_view_index], -1,
SQLITE_STATIC);
break;
case 1:
to_sqlite(ctx, tf->get_index());
break;
case 2:
to_sqlite(ctx, tss->get_filtered_count_for(tf->get_index()));
break;
}
return SQLITE_OK;
}
};
struct lnav_view_files : public tvt_iterator_cursor<lnav_view_files> {
static constexpr const char *NAME = "lnav_view_files";
static constexpr const char *CREATE_STMT = R"(
--
CREATE TABLE lnav_view_files (
view_name TEXT, -- The name of the view.
filepath TEXT, -- The path to the file.
visible INTEGER -- Indicates whether or not the file is shown.
);
)";
using iterator = logfile_sub_source::iterator;
iterator begin() {
return lnav_data.ld_log_source.begin();
}
iterator end() {
return lnav_data.ld_log_source.end();
}
int get_column(cursor &vc, sqlite3_context *ctx, int col) {
auto& ld = *vc.iter;
switch (col) {
case 0:
sqlite3_result_text(ctx,
lnav_view_strings[LNV_LOG], -1,
SQLITE_STATIC);
break;
case 1:
to_sqlite(ctx, ld->ld_filter_state.lfo_filter_state
.tfs_logfile->get_filename());
break;
case 2:
to_sqlite(ctx, ld->ld_visible);
break;
}
return SQLITE_OK;
}
int delete_row(sqlite3_vtab *tab, sqlite3_int64 rowid) {
tab->zErrMsg = sqlite3_mprintf(
"Rows cannot be deleted from the lnav_view_files table");
return SQLITE_ERROR;
}
int insert_row(sqlite3_vtab *tab, sqlite3_int64 &rowid_out) {
tab->zErrMsg = sqlite3_mprintf(
"Rows cannot be inserted into the lnav_view_files table");
return SQLITE_ERROR;
};
int update_row(sqlite3_vtab *tab,
sqlite3_int64 &rowid,
const char *view_name,
const char *file_path,
bool visible) {
auto &lss = lnav_data.ld_log_source;
auto iter = this->begin();
std::advance(iter, rowid);
auto& ld = *iter;
if (ld->ld_visible != visible) {
ld->set_visibility(visible);
lss.text_filters_changed();
}
return SQLITE_OK;
}
};
static const char *CREATE_FILTER_VIEW = R"(
CREATE VIEW lnav_view_filters_and_stats AS
SELECT * FROM lnav_view_filters LEFT NATURAL JOIN lnav_view_filter_stats
)";
static auto a = injector::bind_multiple<vtab_module_base>()
.add<vtab_module<lnav_views>>()
.add<vtab_module<lnav_view_stack>>()
.add<vtab_module<lnav_view_filters>>()
.add<vtab_module<tvt_no_update<lnav_view_filter_stats>>>()
.add<vtab_module<lnav_view_files>>();
int register_views_vtab(sqlite3 *db)
{
char *errmsg;
if (sqlite3_exec(db, CREATE_FILTER_VIEW, nullptr, nullptr, &errmsg) != SQLITE_OK) {
log_error("Unable to create filter view: %s", errmsg);
}
return 0;
}
| 11,934 |
5,169 | {
"name": "CJComplexUIKit",
"version": "0.0.4",
"summary": "自定义的稍微复杂的UI",
"homepage": "https://github.com/dvlproad/CJUIKit",
"description": "*、CJDataScrollView:带数据的列表视图或集合视图(常用于搜索、图片选择)\n\n A longer description of CJPopupAction in Markdown format.\n\n * Think: Why did you write this? What is the focus? What does it do?\n * CocoaPods will be using this to generate tags, and improve search results.\n * Try to keep it short, snappy and to the point.\n * Finally, don't worry about the indent, CocoaPods strips it!",
"license": "MIT",
"authors": {
"dvlproad": ""
},
"platforms": {
"ios": "7.0"
},
"source": {
"git": "https://github.com/dvlproad/CJUIKit.git",
"tag": "CJComplexUIKit_0.0.4"
},
"source_files": "CJComplexUIKit/*.{h,m}",
"frameworks": "UIKit",
"requires_arc": true,
"subspecs": [
{
"name": "CJDataScrollView",
"subspecs": [
{
"name": "SearchScrollView",
"source_files": "CJComplexUIKit/CJDataScrollView/SearchScrollView/**/*.{h,m}",
"dependencies": {
"CJBaseUIKit/CJCollectionView/MyEqualCellSizeCollectionView": [
],
"CJBaseUtil/CJDataUtil": [
],
"NSOperationQueueUtil": [
]
}
},
{
"name": "ImagePickerCollectionlView",
"source_files": "CJComplexUIKit/CJDataScrollView/ImagePickerCollectionlView/**/*.{h,m}",
"resources": "CJComplexUIKit/CJDataScrollView/ImagePickerCollectionlView/**/*.{png,xib,bundle}",
"frameworks": "MediaPlayer",
"dependencies": {
"JGActionSheet": [
],
"CJBaseUIKit/UIImage+CJCategory": [
],
"CJBaseUIKit/CJCollectionView/MyEqualCellSizeCollectionView": [
],
"CJBaseUIKit/CJCollectionView/CJBaseCollectionViewCell": [
],
"CJMedia/CJValidateAuthorizationUtil": [
"~> 0.0.4"
],
"CJMedia/MySingleImagePickerController": [
"~> 0.0.4"
],
"CJMedia/CJPhotoBrowser": [
"~> 0.0.4"
],
"CJFile/CJFileManager": [
"~> 0.0.7"
],
"CJNetwork/AFNetworkingUploadComponent": [
"~> 0.1.3"
]
}
}
]
}
]
}
| 1,303 |
1,738 | <filename>dev/Gems/CloudGemFramework/v1/ResourceManager/resource_manager/test/test_integration_service_api_with_custom_domain_name.py<gh_stars>1000+
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import json
import subprocess
import os
import time
import warnings
from resource_manager.test import lmbr_aws_test_support
from resource_manager.test import service_api_common_support
import resource_management
import cgf_service_client
from resource_manager.test import base_stack_test
class IntegrationTest_CloudGemFramework_ServiceApiWithCustomDomainName(
service_api_common_support.BaseServiceApiTestCase):
""" Integration tests for service API created with custom domain name.
To run these tests:
(1) Contact ly-infra to create a sub-domain name under your AWS own account
(2) Follow all the steps mentioned in the following document to set up a custom domain name for API Gateway:
https://aws.amazon.com/premiumsupport/knowledge-center/custom-domain-name-amazon-api-gateway/
"""
# Replace the value of CUSTOM_DOMAIN_NAME with your own custom domain name.
CUSTOM_DOMAIN_NAME = 'example.com'
STAGE = 'api'
def __init__(self, *args, **kwargs):
super(IntegrationTest_CloudGemFramework_ServiceApiWithCustomDomainName, self).__init__(*args, **kwargs)
def setUp(self):
self.set_deployment_name(lmbr_aws_test_support.unique_name())
self.set_resource_group_name(lmbr_aws_test_support.unique_name('customdomain'))
self.prepare_test_environment("cloud_gem_framework_service_api_with_custom_domain_name_test")
self.register_for_shared_resources()
# Ignore warnings based on https://github.com/boto/boto3/issues/454 for now
# Needs to be set per tests as its reset
warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning)
def __000_create_stacks(self):
if not self.has_project_stack():
self.lmbr_aws(
'project', 'create',
'--stack-name', self.TEST_PROJECT_STACK_NAME,
'--confirm-aws-usage',
'--confirm-security-change',
'--admin-roles',
'--region', lmbr_aws_test_support.REGION,
'--custom-domain-name', self.CUSTOM_DOMAIN_NAME
)
self.setup_deployment_stacks()
def __010_add_service_api_resources(self):
self.add_service_api_resources()
def __020_create_resources(self):
self.create_resources()
def __030_verify_service_api_resources(self):
self.verify_service_api_resources()
def __040_verify_service_url(self):
self.__verify_service_url(self.CUSTOM_DOMAIN_NAME)
def __050_call_simple_api(self):
self.call_simple_api()
def __060_call_simple_api_with_no_credentials(self):
self.call_simple_api_with_no_credentials()
def __100_add_complex_apis(self):
self.add_complex_apis()
def __110_add_apis_with_optional_parameters(self):
self.add_apis_with_optional_parameters()
def __120_add_interfaces(self):
self.add_interfaces()
def __130_add_legacy_plugin(self):
self.add_legacy_plugin()
def __200_update_deployment(self):
self.update_deployment()
def __210_verify_service_api_mapping(self):
self.verify_service_api_mapping()
def __300_call_complex_apis(self):
self.call_complex_apis()
def __301_call_complex_api_using_player_credentials(self):
self.call_complex_api_using_player_credentials()
def __302_call_complex_api_using_project_admin_credentials(self):
self.call_complex_api_using_project_admin_credentials()
def __303_call_complex_api_using_deployment_admin_credentials(self):
self.call_complex_api_using_deployment_admin_credentials()
def __304_call_complex_api_with_missing_string_pathparam(self):
self.call_complex_api_with_missing_string_pathparam()
def __305_call_complex_api_with_missing_string_queryparam(self):
self.call_complex_api_with_missing_string_queryparam()
def __306_call_complex_api_with_missing_string_bodyparam(self):
self.call_complex_api_with_missing_string_bodyparam()
def __307_call_api_with_both_parameters(self):
self.call_api_with_both_parameters()
def __308_call_api_without_bodyparam(self):
self.call_api_without_bodyparam()
def __309_call_api_without_queryparam(self):
self.call_api_without_queryparam()
def __400_call_test_plugin(self):
self.call_test_plugin()
def __500_call_interface_directly(self):
self.call_interface_directly()
def __501_call_interface_directly_with_player_credential(self):
self.call_interface_directly_with_player_credential()
def __502_call_interface_directly_without_credential(self):
self.call_interface_directly_without_credential()
def __503_invoke_interface_caller(self):
self.invoke_interface_caller()
def __700_run_cpp_tests(self):
self.run_cpp_tests()
def __800_set_gem_service_api_custom_resource_version(self):
self.__set_gem_service_api_custom_resource_version()
def __810_remove_custom_domain_name(self):
self.lmbr_aws(
'project', 'update',
'--confirm-aws-usage',
'--confirm-security-change'
)
self.lmbr_aws('resource-group', 'upload-resources', '--resource-group', self.TEST_RESOURCE_GROUP_NAME, '--deployment', self.TEST_DEPLOYMENT_NAME,
'--confirm-aws-usage', '--confirm-security-change', '--confirm-resource-deletion')
self.refresh_stack_description(self.get_resource_group_stack_arn(self.TEST_DEPLOYMENT_NAME, self.TEST_RESOURCE_GROUP_NAME))
self.lmbr_aws('mappings', 'update', '-d', self.TEST_DEPLOYMENT_NAME, '--ignore-cache')
def __820_verify_default_service_url(self):
self.__verify_service_url()
def __830_call_simple_api_using_default_service_url(self):
self.call_simple_api()
def __840_verify_service_api_mapping_with_default_service_url(self):
self.verify_service_api_mapping()
def __850_update_custom_domain_name(self):
self.lmbr_aws(
'project', 'update',
'--custom-domain-name', self.CUSTOM_DOMAIN_NAME,
'--confirm-aws-usage',
'--confirm-security-change'
)
self.lmbr_aws('resource-group', 'upload-resources', '--resource-group', self.TEST_RESOURCE_GROUP_NAME, '--deployment', self.TEST_DEPLOYMENT_NAME,
'--confirm-aws-usage', '--confirm-security-change', '--confirm-resource-deletion')
self.refresh_stack_description(self.get_resource_group_stack_arn(self.TEST_DEPLOYMENT_NAME, self.TEST_RESOURCE_GROUP_NAME))
self.lmbr_aws('mappings', 'update', '-d', self.TEST_DEPLOYMENT_NAME, '--ignore-cache')
def __860_verify_alternative_service_url(self):
self.__verify_service_url(self.CUSTOM_DOMAIN_NAME)
def __870_call_simple_api_using_alternative_service_url(self):
self.call_simple_api()
def __880_verify_service_api_mapping_with_alternative_service_url(self):
self.verify_service_api_mapping()
def __900_remove_service_api_resources(self):
self.remove_service_api_resources()
def __910_delete_resources(self):
self.delete_resources()
def __999_cleanup(self):
self.cleanup()
def __verify_service_url(self, custom_domain_name=''):
url = self.get_service_url(stack_id=self.get_resource_group_stack_arn(self.TEST_DEPLOYMENT_NAME, self.TEST_RESOURCE_GROUP_NAME))
slash_parts = url.split('/')
self.assertEqual(len(slash_parts), 4)
if custom_domain_name:
# Alternative service Url has the format of https://{custom_domain_name}/{region}.{stage_name}.{rest_api_id}
self.assertEqual(slash_parts[2], custom_domain_name)
dot_parts = slash_parts[3].split('.')
self.assertEqual(len(dot_parts), 3)
self.assertEqual(dot_parts[0], lmbr_aws_test_support.REGION)
self.assertEqual(dot_parts[1], self.STAGE)
else:
# Default service Url has the format of https://{rest_api_id}.execute-api.{region}.amazonaws.com/{stage_name}
self.assertEqual(slash_parts[3], self.STAGE)
dot_parts = slash_parts[2].split('.')
self.assertEqual(len(dot_parts), 5)
self.assertEqual(dot_parts[1], 'execute-api')
self.assertEqual(dot_parts[2], lmbr_aws_test_support.REGION)
self.assertEqual(dot_parts[3], 'amazonaws')
self.assertEqual(dot_parts[4], 'com')
def __set_gem_service_api_custom_resource_version(self):
# Set CustomResourceVersion to $LATEST so that we can use the latest version of the handler lambda
file_path = self.get_gem_aws_path(self.TEST_RESOURCE_GROUP_NAME, 'resource-template.json')
with open(file_path, 'r') as file:
template = json.load(file)
if not template['Resources']['ServiceApi']['Metadata']:
template['Resources']['ServiceApi']['Metadata'] = {}
if not template['Resources']['ServiceApi']['Metadata']['CloudCanvas']:
template['Resources']['ServiceApi']['Metadata']['CloudCanvas'] = {}
template['Resources']['ServiceApi']['Metadata']['CloudCanvas'].update(
{'CustomResourceVersion': '$LATEST'})
with open(file_path, 'w') as file:
json.dump(template, file, indent=4, sort_keys=True)
| 4,194 |
418 | import segm.utils.torch as ptu
from segm.data import ImagenetDataset
from segm.data import ADE20KSegmentation
from segm.data import PascalContextDataset
from segm.data import CityscapesDataset
from segm.data import Loader
def create_dataset(dataset_kwargs):
dataset_kwargs = dataset_kwargs.copy()
dataset_name = dataset_kwargs.pop("dataset")
batch_size = dataset_kwargs.pop("batch_size")
num_workers = dataset_kwargs.pop("num_workers")
split = dataset_kwargs.pop("split")
# load dataset_name
if dataset_name == "imagenet":
dataset_kwargs.pop("patch_size")
dataset = ImagenetDataset(split=split, **dataset_kwargs)
elif dataset_name == "ade20k":
dataset = ADE20KSegmentation(split=split, **dataset_kwargs)
elif dataset_name == "pascal_context":
dataset = PascalContextDataset(split=split, **dataset_kwargs)
elif dataset_name == "cityscapes":
dataset = CityscapesDataset(split=split, **dataset_kwargs)
else:
raise ValueError(f"Dataset {dataset_name} is unknown.")
dataset = Loader(
dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
distributed=ptu.distributed,
split=split,
)
return dataset
| 507 |
2,151 | package org.junit.tests.running.methods;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.io.Writer;
import java.util.concurrent.TimeUnit;
import junit.framework.JUnit4TestAdapter;
import junit.framework.TestResult;
import org.junit.After;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestRule;
import org.junit.rules.Timeout;
import org.junit.runner.JUnitCore;
import org.junit.runner.Result;
public class TimeoutTest {
public static class FailureWithTimeoutTest {
@Test(timeout = 1000)
public void failure() {
fail();
}
}
@Test
public void failureWithTimeout() throws Exception {
JUnitCore core = new JUnitCore();
Result result = core.run(FailureWithTimeoutTest.class);
assertEquals(1, result.getRunCount());
assertEquals(1, result.getFailureCount());
assertEquals(AssertionError.class, result.getFailures().get(0).getException().getClass());
}
public static class FailureWithTimeoutRunTimeExceptionTest {
@Test(timeout = 1000)
public void failure() {
throw new NullPointerException();
}
}
@Test
public void failureWithTimeoutRunTimeException() throws Exception {
JUnitCore core = new JUnitCore();
Result result = core.run(FailureWithTimeoutRunTimeExceptionTest.class);
assertEquals(1, result.getRunCount());
assertEquals(1, result.getFailureCount());
assertEquals(NullPointerException.class, result.getFailures().get(0).getException().getClass());
}
public static class SuccessWithTimeoutTest {
@Test(timeout = 1000)
public void success() {
}
}
@Test
public void successWithTimeout() throws Exception {
JUnitCore core = new JUnitCore();
Result result = core.run(SuccessWithTimeoutTest.class);
assertEquals(1, result.getRunCount());
assertEquals(0, result.getFailureCount());
}
public static class TimeoutFailureTest {
@Test(timeout = 100)
public void success() throws InterruptedException {
Thread.sleep(40000);
}
}
@Ignore("was breaking gump")
@Test
public void timeoutFailure() throws Exception {
JUnitCore core = new JUnitCore();
Result result = core.run(TimeoutFailureTest.class);
assertEquals(1, result.getRunCount());
assertEquals(1, result.getFailureCount());
assertEquals(InterruptedException.class, result.getFailures().get(0).getException().getClass());
}
public static class InfiniteLoopTest {
@Test(timeout = 100)
public void failure() {
infiniteLoop();
}
private void infiniteLoop() {
for (; ; ) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
}
}
}
}
@Test
public void infiniteLoop() throws Exception {
JUnitCore core = new JUnitCore();
Result result = core.run(InfiniteLoopTest.class);
assertEquals(1, result.getRunCount());
assertEquals(1, result.getFailureCount());
Throwable exception = result.getFailures().get(0).getException();
assertTrue(exception.getMessage().contains("test timed out after 100 milliseconds"));
}
public static class ImpatientLoopTest {
@Test(timeout = 1)
public void failure() {
infiniteLoop();
}
private void infiniteLoop() {
for (; ; ) ;
}
}
@Ignore("This breaks sporadically with time differences just slightly more than 200ms")
@Test
public void infiniteLoopRunsForApproximatelyLengthOfTimeout() throws Exception {
// "prime the pump": running these beforehand makes the runtimes more predictable
// (because of class loading?)
JUnitCore.runClasses(InfiniteLoopTest.class, ImpatientLoopTest.class);
long longTime = runAndTime(InfiniteLoopTest.class);
long shortTime = runAndTime(ImpatientLoopTest.class);
long difference = longTime - shortTime;
assertTrue(String.format("Difference was %sms", difference), difference < 200);
}
private long runAndTime(Class<?> clazz) {
JUnitCore core = new JUnitCore();
long startTime = System.currentTimeMillis();
core.run(clazz);
long totalTime = System.currentTimeMillis() - startTime;
return totalTime;
}
private String stackForException(Throwable exception) {
Writer buffer = new StringWriter();
PrintWriter writer = new PrintWriter(buffer);
exception.printStackTrace(writer);
return buffer.toString();
}
@Test
public void stalledThreadAppearsInStackTrace() throws Exception {
JUnitCore core = new JUnitCore();
Result result = core.run(InfiniteLoopTest.class);
assertEquals(1, result.getRunCount());
assertEquals(1, result.getFailureCount());
Throwable exception = result.getFailures().get(0).getException();
assertThat(stackForException(exception), containsString("infiniteLoop")); // Make sure we have the stalled frame on the stack somewhere
}
public static class InfiniteLoopMultithreaded {
private static class ThreadTest implements Runnable {
private boolean fStall;
public ThreadTest(boolean stall) {
fStall = stall;
}
public void run() {
if (fStall)
for (; ; ) ;
try {
Thread.sleep(500);
} catch (InterruptedException e) {
}
}
}
public void failure(boolean mainThreadStalls) throws Exception {
Thread t1 = new Thread(new ThreadTest(false), "timeout-thr1");
Thread t2 = new Thread(new ThreadTest(!mainThreadStalls), "timeout-thr2");
Thread t3 = new Thread(new ThreadTest(false), "timeout-thr3");
t1.start();
t2.start();
t3.start();
if (mainThreadStalls)
for (; ; ) ;
t1.join();
t2.join();
t3.join();
}
}
public static class InfiniteLoopWithStuckThreadTest {
@Rule
public TestRule globalTimeout = Timeout.builder()
.withTimeout(100, TimeUnit.MILLISECONDS)
.withLookingForStuckThread(true)
.build();
@Test
public void failure() throws Exception {
(new InfiniteLoopMultithreaded()).failure(false);
}
}
public static class InfiniteLoopStuckInMainThreadTest {
@Rule
public TestRule globalTimeout = Timeout.builder()
.withTimeout(100, TimeUnit.MILLISECONDS)
.withLookingForStuckThread(true)
.build();
@Test
public void failure() throws Exception {
(new InfiniteLoopMultithreaded()).failure(true);
}
}
@Test
public void timeoutFailureMultithreaded() throws Exception {
JUnitCore core = new JUnitCore();
Result result = core.run(InfiniteLoopWithStuckThreadTest.class);
assertEquals(1, result.getRunCount());
assertEquals(2, result.getFailureCount());
Throwable exception[] = new Throwable[2];
for (int i = 0; i < 2; i++)
exception[i] = result.getFailures().get(i).getException();
assertThat(exception[0].getMessage(), containsString("test timed out after 100 milliseconds"));
assertThat(stackForException(exception[0]), containsString("Thread.join"));
assertThat(exception[1].getMessage(), containsString("Appears to be stuck in thread timeout-thr2"));
}
@Test
public void timeoutFailureMultithreadedStuckInMain() throws Exception {
JUnitCore core = new JUnitCore();
Result result = core.run(InfiniteLoopStuckInMainThreadTest.class);
assertEquals(1, result.getRunCount());
assertEquals(1, result.getFailureCount());
Throwable exception = result.getFailures().get(0).getException();
assertThat(exception.getMessage(), containsString("test timed out after 100 milliseconds"));
assertThat(exception.getMessage(), not(containsString("Appears to be stuck")));
}
@Test
public void compatibility() {
TestResult result = new TestResult();
new JUnit4TestAdapter(InfiniteLoopTest.class).run(result);
assertEquals(1, result.errorCount());
}
public static class WillTimeOut {
static boolean afterWasCalled = false;
@Test(timeout = 1)
public void test() {
for (; ; ) {
try {
Thread.sleep(10000);
} catch (InterruptedException e) {
// ok, tests are over
}
}
}
@After
public void after() {
afterWasCalled = true;
}
}
@Test
public void makeSureAfterIsCalledAfterATimeout() {
JUnitCore.runClasses(WillTimeOut.class);
assertThat(WillTimeOut.afterWasCalled, is(true));
}
public static class TimeOutZero {
@Rule
public Timeout timeout = Timeout.seconds(0);
@Test
public void test() {
try {
Thread.sleep(200); // long enough to suspend thread execution
} catch (InterruptedException e) {
// Don't care
}
}
}
@Test
public void testZeroTimeoutIsIgnored() {
JUnitCore core = new JUnitCore();
Result result = core.run(TimeOutZero.class);
assertEquals("Should run the test", 1, result.getRunCount());
assertEquals("Test should not have failed", 0, result.getFailureCount());
}
private static class TimeoutSubclass extends Timeout {
public TimeoutSubclass(long timeout, TimeUnit timeUnit) {
super(timeout, timeUnit);
}
public long getTimeoutFromSuperclass(TimeUnit unit) {
return super.getTimeout(unit);
}
}
public static class TimeOutOneSecond {
@Rule
public TimeoutSubclass timeout = new TimeoutSubclass(1, TimeUnit.SECONDS);
@Test
public void test() {
assertEquals(1000, timeout.getTimeoutFromSuperclass(TimeUnit.MILLISECONDS));
}
}
@Test
public void testGetTimeout() {
JUnitCore core = new JUnitCore();
Result result = core.run(TimeOutOneSecond.class);
assertEquals("Should run the test", 1, result.getRunCount());
assertEquals("Test should not have failed", 0, result.getFailureCount());
}
}
| 4,645 |
565 |
#pragma once
#include "quantities/dimensions.hpp"
#include "base/not_constructible.hpp"
namespace principia {
namespace quantities {
namespace internal_dimensions {
using base::not_constructible;
class ExponentSerializer : not_constructible {
public:
// Returns true if the exponent is in the range that we can serialize.
static constexpr bool IsSerializable(std::int64_t exponent);
// Returns the serialized representation of the exponent. |position| is the
// 0-based position of the dimension in the representation.
static constexpr std::int64_t Representation(
std::int64_t exponent,
std::int64_t position);
private:
static constexpr std::int64_t min_exponent = -24;
static constexpr std::int64_t max_exponent = 7;
static constexpr std::int64_t exponent_mask = 0x1F;
static constexpr std::int64_t exponent_bits = 5;
};
constexpr bool ExponentSerializer::IsSerializable(
std::int64_t const exponent) {
return exponent >= min_exponent && exponent <= max_exponent;
}
constexpr std::int64_t ExponentSerializer::Representation(
std::int64_t const exponent,
std::int64_t const position) {
// For exponents in [-16, 7] this returns the representations
// 0x10, 0x11, ... 0x00, ... 0x07. For exponents in [-24, -17] this returns
// the representations 0x08, 0x09, ... 0x0F. The latter used to be reserved
// for exponents in the range [8, 15] but we believe that we never used them,
// and with polynomials in the monomial basis we need large negative
// exponents.
return (exponent & exponent_mask) << (position * exponent_bits);
}
template<std::int64_t LengthExponent,
std::int64_t MassExponent,
std::int64_t TimeExponent,
std::int64_t CurrentExponent,
std::int64_t TemperatureExponent,
std::int64_t AmountExponent,
std::int64_t LuminousIntensityExponent,
std::int64_t AngleExponent>
struct Dimensions : not_constructible {
enum {
Length = LengthExponent,
Mass = MassExponent,
Time = TimeExponent,
Current = CurrentExponent,
Temperature = TemperatureExponent,
Amount = AmountExponent,
LuminousIntensity = LuminousIntensityExponent,
Angle = AngleExponent,
};
static std::int64_t constexpr representation =
ExponentSerializer::Representation(LengthExponent, 0) |
ExponentSerializer::Representation(MassExponent, 1) |
ExponentSerializer::Representation(TimeExponent, 2) |
ExponentSerializer::Representation(CurrentExponent, 3) |
ExponentSerializer::Representation(TemperatureExponent, 4) |
ExponentSerializer::Representation(AmountExponent, 5) |
ExponentSerializer::Representation(LuminousIntensityExponent, 6) |
ExponentSerializer::Representation(AngleExponent, 7);
};
template<typename Dimensions>
struct DimensionsAreSerializable : std::true_type {
static_assert(ExponentSerializer::IsSerializable(Dimensions::Length),
"Invalid length exponent");
static_assert(ExponentSerializer::IsSerializable(Dimensions::Mass),
"Invalid mass exponent");
static_assert(ExponentSerializer::IsSerializable(Dimensions::Time),
"Invalid time exponent");
static_assert(ExponentSerializer::IsSerializable(Dimensions::Current),
"Invalid current exponent");
static_assert(ExponentSerializer::IsSerializable(Dimensions::Temperature),
"Invalid temperature exponent");
static_assert(ExponentSerializer::IsSerializable(Dimensions::Amount),
"Invalid amount exponent");
static_assert(ExponentSerializer::IsSerializable(
Dimensions::LuminousIntensity),
"Invalid luminous intensity exponent");
static_assert(ExponentSerializer::IsSerializable(Dimensions::Angle),
"Invalid angle exponent");
};
template<typename Dimensions, int n>
struct DimensionsExponentiationGenerator : not_constructible {
using Type =
internal_dimensions::Dimensions<Dimensions::Length * n,
Dimensions::Mass * n,
Dimensions::Time * n,
Dimensions::Current * n,
Dimensions::Temperature * n,
Dimensions::Amount * n,
Dimensions::LuminousIntensity * n,
Dimensions::Angle * n>;
};
template<typename Dimensions, int n>
struct DimensionsNthRootGenerator : not_constructible {
static_assert((Dimensions::Length % n) == 0 &&
(Dimensions::Mass % n) == 0 &&
(Dimensions::Time % n) == 0 &&
(Dimensions::Current % n) == 0 &&
(Dimensions::Temperature % n) == 0 &&
(Dimensions::Amount % n) == 0 &&
(Dimensions::LuminousIntensity % n) == 0 &&
(Dimensions::Angle % n) == 0,
"Dimensions not suitable for Nth root");
using Type =
internal_dimensions::Dimensions<Dimensions::Length / n,
Dimensions::Mass / n,
Dimensions::Time / n,
Dimensions::Current / n,
Dimensions::Temperature / n,
Dimensions::Amount / n,
Dimensions::LuminousIntensity / n,
Dimensions::Angle / n>;
};
template<typename LDimensions, typename RDimensions>
struct DimensionsProductGenerator : not_constructible {
using Type = Dimensions<LDimensions::Length + RDimensions::Length,
LDimensions::Mass + RDimensions::Mass,
LDimensions::Time + RDimensions::Time,
LDimensions::Current + RDimensions::Current,
LDimensions::Temperature + RDimensions::Temperature,
LDimensions::Amount + RDimensions::Amount,
LDimensions::LuminousIntensity +
RDimensions::LuminousIntensity,
LDimensions::Angle + RDimensions::Angle>;
};
template<typename LDimensions, typename RDimensions>
struct DimensionsQuotientGenerator : not_constructible {
using Type = Dimensions<LDimensions::Length - RDimensions::Length,
LDimensions::Mass - RDimensions::Mass,
LDimensions::Time - RDimensions::Time,
LDimensions::Current - RDimensions::Current,
LDimensions::Temperature - RDimensions::Temperature,
LDimensions::Amount - RDimensions::Amount,
LDimensions::LuminousIntensity -
RDimensions::LuminousIntensity,
LDimensions::Angle - RDimensions::Angle>;
};
} // namespace internal_dimensions
} // namespace quantities
} // namespace principia
| 3,175 |
344 | <filename>src/Examples/Ch08-GeometryColoring/Geometry-02-BasicDrawing/Player.h
//===============================================================================
// @ Player.h
// ------------------------------------------------------------------------------
// Main player
//
// Copyright (C) 2008-2015 by <NAME> and <NAME>.
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
//
//
//===============================================================================
#ifndef __PlayerDefs__
#define __PlayerDefs__
//-------------------------------------------------------------------------------
//-- Dependencies ---------------------------------------------------------------
//-------------------------------------------------------------------------------
#include <IvMatrix33.h>
#include <IvVector2.h>
#include <IvVector3.h>
//-------------------------------------------------------------------------------
//-- Classes --------------------------------------------------------------------
//-------------------------------------------------------------------------------
class IvIndexBuffer;
class IvVertexBuffer;
class Player
{
public:
Player();
~Player();
void Update( float dt );
void Render();
private:
void CreateSphere();
void DrawSphere();
float mScale;
IvMatrix33 mRotate;
IvVector3 mTranslate;
float mRadius;
IvIndexBuffer* mSphereIndices;
IvVertexBuffer* mSphereVerts;
};
#endif
| 350 |
1,707 | <gh_stars>1000+
//------------------------------------------------------------------------------
// ResourcePoolTest.cc
//------------------------------------------------------------------------------
#include "Pre.h"
#include "UnitTest++/src/UnitTest++.h"
#include "Resource/ResourcePool.h"
#include "Resource/ResourceBase.h"
using namespace Oryol;
class myResource : public ResourceBase {
public:
int blub = 0;
};
class myResourcePool : public ResourcePool<myResource> { };
TEST(ResourcePoolTest) {
const uint16_t myResourceType = 12;
const int poolSize = 256;
myResourcePool resourcePool;
resourcePool.Setup(myResourceType, poolSize);
CHECK(resourcePool.IsValid());
CHECK(resourcePool.GetNumSlots() == 256);
CHECK(resourcePool.GetNumFreeSlots() == 256);
CHECK(resourcePool.GetNumUsedSlots() == 0);
CHECK(resourcePool.LastAllocSlot == 0);
Id resId = resourcePool.AllocId();
CHECK(resId.IsValid());
CHECK(resId.Type == 12);
CHECK(resId.SlotIndex == 0);
CHECK(ResourceState::InvalidState == resourcePool.QueryState(resId));
CHECK(resourcePool.LastAllocSlot == 0);
resourcePool.Assign(resId, ResourceState::Valid);
CHECK(resourcePool.GetNumFreeSlots() == 255);
CHECK(resourcePool.GetNumUsedSlots() == 1);
CHECK(ResourceState::Valid == resourcePool.QueryState(resId));
const myResource* res = resourcePool.Lookup(resId);
CHECK(nullptr != res);
CHECK(res->Id == resId);
CHECK(resourcePool.QueryResourceInfo(resId).State == ResourceState::Valid);
Id resId1 = resourcePool.AllocId();
CHECK(resId1.IsValid());
CHECK(resId1.Type == 12);
CHECK(resId1.SlotIndex == 1);
CHECK(ResourceState::InvalidState == resourcePool.QueryState(resId1));
CHECK(resourcePool.LastAllocSlot == 1);
resourcePool.Assign(resId1, ResourceState::Valid);
CHECK(resourcePool.GetNumFreeSlots() == 254);
CHECK(resourcePool.GetNumUsedSlots() == 2);
CHECK(ResourceState::Valid == resourcePool.QueryState(resId));
const myResource* res1 = resourcePool.Lookup(resId1);
CHECK(nullptr != res1);
CHECK(res1->Id == resId1);
CHECK(resourcePool.QueryResourceInfo(resId1).State == ResourceState::Valid);
const ResourcePoolInfo poolInfo = resourcePool.QueryPoolInfo();
CHECK(poolInfo.ResourceType == myResourceType);
CHECK(poolInfo.NumSlots == 256);
CHECK(poolInfo.NumUsedSlots == 2);
CHECK(poolInfo.NumFreeSlots == 254);
resourcePool.Unassign(resId);
CHECK(resourcePool.GetNumFreeSlots() == 255);
CHECK(resourcePool.GetNumUsedSlots() == 1);
CHECK(resourcePool.QueryState(resId) == ResourceState::InvalidState);
CHECK(resourcePool.LastAllocSlot == 1);
resourcePool.Unassign(resId1);
CHECK(resourcePool.GetNumFreeSlots() == 256);
CHECK(resourcePool.GetNumUsedSlots() == 0);
CHECK(resourcePool.QueryState(resId1) == ResourceState::InvalidState);
CHECK(resourcePool.LastAllocSlot == 0);
resourcePool.Discard();
CHECK(!resourcePool.IsValid());
}
| 1,090 |
3,183 | <reponame>DeathGOD7/pythonnet<gh_stars>1000+
"""
Implements collections.abc for common .NET types
https://docs.python.org/3.6/library/collections.abc.html
"""
import collections.abc as col
class IteratorMixin(col.Iterator):
def close(self):
self.Dispose()
class IterableMixin(col.Iterable):
pass
class SizedMixin(col.Sized):
def __len__(self): return self.Count
class ContainerMixin(col.Container):
def __contains__(self, item): return self.Contains(item)
try:
abc_Collection = col.Collection
except AttributeError:
# Python 3.5- does not have collections.abc.Collection
abc_Collection = col.Container
class CollectionMixin(SizedMixin, IterableMixin, ContainerMixin, abc_Collection):
pass
class SequenceMixin(CollectionMixin, col.Sequence):
pass
class MutableSequenceMixin(SequenceMixin, col.MutableSequence):
pass
class MappingMixin(CollectionMixin, col.Mapping):
def __contains__(self, item): return self.ContainsKey(item)
def keys(self): return self.Keys
def items(self): return [(k,self.get(k)) for k in self.Keys]
def values(self): return self.Values
def __iter__(self): return self.Keys.__iter__()
def get(self, key, default=None):
existed, item = self.TryGetValue(key, None)
return item if existed else default
class MutableMappingMixin(MappingMixin, col.MutableMapping):
_UNSET_ = object()
def __delitem__(self, key):
self.Remove(key)
def clear(self):
self.Clear()
def pop(self, key, default=_UNSET_):
existed, item = self.TryGetValue(key, None)
if existed:
self.Remove(key)
return item
elif default == self._UNSET_:
raise KeyError(key)
else:
return default
def setdefault(self, key, value=None):
existed, item = self.TryGetValue(key, None)
if existed:
return item
else:
self[key] = value
return value
def update(self, items, **kwargs):
if isinstance(items, col.Mapping):
for key, value in items.items():
self[key] = value
else:
for key, value in items:
self[key] = value
for key, value in kwargs.items():
self[key] = value
| 967 |
1,160 | #
# Copyright 2014 ARM Limited and Contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of ARM Limited nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ARM LIMITED AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''change the copy right year
there are 4 cases
1 2014 => 2014
2 20xx => 20xx-14
3 20xx-bb => 20xx-14
4 20xx-14 => 20xx-14
'''
import re, os, sys
import fileinput
import argparse
def extend_copyright(line, year):
'''year is the year which you want to extend to'''
#match the format like 'Copyright 2014 ARM Limited' or 'Copyright 2011-14 ARM Limited'
p2014 = re.compile(r'.*{}.*'.format(year))
if p2014.match(line):
return line
#match the format like 'Copyright 2011-12 ARM Limited'
p20xx_bb = re.compile(r'(.*)(20\d\d)(-)(\d\d)(.*)')
m = p20xx_bb.match(line)
if m:
return p20xx_bb.sub(r'\g<1>\g<2>\g<3>{}\g<5>'.format(year), line)
#match the format like 'Copyright 2012 ARM Limited'
p20xx = re.compile(r'(.*)(20\d\d)(.*)')
m = p20xx.match(line)
if m:
return p20xx.sub(r'\g<1>\g<2>-{}\g<3>'.format(year), line)
def replace_line(file,search_exp,replace_exp):
for line in fileinput.input(file, inplace=1):
if search_exp in line:
line = line.replace(search_exp, replace_exp)
sys.stdout.write(line)
def test():
year = '14'
if extend_copyright('Copyright 2011-12 ARM Limited', year) != 'Copyright 2011-14 ARM Limited':
print "test failed"
return
if extend_copyright('Copyright 2013-14 ARM Limited', year) != 'Copyright 2013-14 ARM Limited':
print "test failed"
return
if extend_copyright('Copyright 2012 ARM Limited', year) != 'Copyright 2012-14 ARM Limited':
print "test failed"
return
if extend_copyright('Copyright 2014 ARM Limited', year) != 'Copyright 2014 ARM Limited':
print "test failed"
return
print "test success."
def extend_copyright_all(extend_to_year):
all_files = []
for root, dirs, files in os.walk(os.getcwd()):
for f in files:
#exclude this script file
if f != os.path.basename(sys.argv[0]):
all_files.append(os.path.join(root, f))
pcopy_right = re.compile(r'.*Copyright [0-9-]* ARM Limited.*')
for f in all_files:
fd = open(f, 'r')
for line in fd.readlines():
m = pcopy_right.match(line)
if m:
old_line = m.group(0)
new_line = extend_copyright(old_line, extend_to_year)
fd.close()
replace_line(f, old_line, new_line)
def main():
parser = argparse.ArgumentParser(description='Extend copyright year to the year you specified.')
parser.add_argument('year', nargs='?', help='year you want to extend, only 2 digitals, e.g.\'14\'')
parser.add_argument('-t', '--test', action='store_true', help='run the test')
args = parser.parse_args()
if args.test:
test()
return
else:
#check input year includes 2 digitals
pdigital2 = re.compile(r'^\d\d$')
if args.year and pdigital2.search(args.year):
extend_copyright_all(args.year)
else:
parser.print_help()
if __name__ == '__main__':
main()
| 1,800 |
376 | // Copyright (C) 2017 <NAME>
// This file is part of the "Nazara Engine - Graphics module"
// For conditions of distribution and use, see copyright notice in Config.hpp
#pragma once
#ifndef NAZARA_UBER_SHADER_HPP
#define NAZARA_UBER_SHADER_HPP
#include <Nazara/Prerequisites.hpp>
#include <Nazara/Core/Algorithm.hpp>
#include <Nazara/Core/Bitset.hpp>
#include <Nazara/Graphics/Config.hpp>
#include <Nazara/Renderer/RenderPipeline.hpp>
#include <Nazara/Shader/Ast/Nodes.hpp>
#include <unordered_map>
namespace Nz
{
class ShaderModule;
class NAZARA_GRAPHICS_API UberShader
{
public:
struct Config;
struct Option;
using ConfigCallback = std::function<void(Config& config, const std::vector<RenderPipelineInfo::VertexBufferData>& vertexBuffers)>;
UberShader(ShaderStageTypeFlags shaderStages, const ShaderAst::StatementPtr& shaderAst);
~UberShader() = default;
inline ShaderStageTypeFlags GetSupportedStages() const;
const std::shared_ptr<ShaderModule>& Get(const Config& config);
inline bool HasOption(const std::string& optionName, Pointer<const Option>* option = nullptr) const;
inline void UpdateConfig(Config& config, const std::vector<RenderPipelineInfo::VertexBufferData>& vertexBuffers);
inline void UpdateConfigCallback(ConfigCallback callback);
static constexpr std::size_t MaximumOptionValue = 32;
struct Config
{
std::array<ShaderAst::ConstantValue, MaximumOptionValue> optionValues;
};
struct ConfigEqual
{
inline bool operator()(const Config& lhs, const Config& rhs) const;
};
struct ConfigHasher
{
inline std::size_t operator()(const Config& config) const;
};
struct Option
{
std::size_t index;
ShaderAst::ExpressionType type;
};
private:
std::unordered_map<Config, std::shared_ptr<ShaderModule>, ConfigHasher, ConfigEqual> m_combinations;
std::unordered_map<std::string, Option> m_optionIndexByName;
ShaderAst::StatementPtr m_shaderAst;
ConfigCallback m_configCallback;
ShaderStageTypeFlags m_shaderStages;
};
}
#include <Nazara/Graphics/UberShader.inl>
#endif // NAZARA_UBER_SHADER_HPP
| 771 |
824 | """
Auto graph classification using cross validation methods proposed in
paper `A Fair Comparison of Graph Neural Networks for Graph Classification`
"""
import sys
import random
import torch
import numpy as np
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
sys.path.append("../")
from autogl.datasets import build_dataset_from_name, utils
from autogl.solver import AutoGraphClassifier
from autogl.module import Acc
if __name__ == "__main__":
parser = ArgumentParser(
"auto graph classification", formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--dataset",
default="mutag",
type=str,
help="graph classification dataset",
choices=["mutag", "imdb-b", "imdb-m", "proteins", "collab"],
)
parser.add_argument(
"--configs", default="../configs/graphclf_full.yml", help="config files"
)
parser.add_argument("--device", type=int, default=0, help="device to run on")
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument("--folds", type=int, default=10, help="fold number")
args = parser.parse_args()
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
seed = args.seed
# set random seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
print("begin processing dataset", args.dataset, "into", args.folds, "folds.")
dataset = build_dataset_from_name(args.dataset)
if args.dataset.startswith("imdb"):
from autogl.module.feature.generators import PYGOneHotDegree
# get max degree
from torch_geometric.utils import degree
max_degree = 0
for data in dataset:
deg_max = int(degree(data.edge_index[0], data.num_nodes).max().item())
max_degree = max(max_degree, deg_max)
dataset = PYGOneHotDegree(max_degree).fit_transform(dataset, inplace=False)
elif args.dataset == "collab":
from autogl.module.feature.auto_feature import Onlyconst
dataset = Onlyconst().fit_transform(dataset, inplace=False)
utils.graph_cross_validation(dataset, args.folds, random_seed=args.seed)
accs = []
for fold in range(args.folds):
print("evaluating on fold number:", fold)
utils.graph_set_fold_id(dataset, fold)
train_dataset = utils.graph_get_split(dataset, "train", False)
autoClassifier = AutoGraphClassifier.from_config(args.configs)
autoClassifier.fit(
train_dataset,
train_split=0.9,
val_split=0.1,
seed=args.seed,
evaluation_method=[Acc],
)
predict_result = autoClassifier.predict_proba(dataset, mask="val")
acc = Acc.evaluate(
predict_result, dataset.data.y[dataset.val_index].cpu().detach().numpy()
)
print(
"test acc %.4f"
% (
Acc.evaluate(
predict_result,
dataset.data.y[dataset.val_index].cpu().detach().numpy(),
)
)
)
accs.append(acc)
print("Average acc on", args.dataset, ":", np.mean(accs), "~", np.std(accs))
| 1,468 |
918 | <reponame>gatarelib/LiquidCore
/*
* Copyright (c) 2018 <NAME>
*
* Distributed under the MIT License. See LICENSE.md at
* https://github.com/LiquidPlayer/LiquidCore for terms and conditions.
*/
#include "V82JSC.h"
#include "JSCPrivate.h"
#include "ObjectTemplate.h"
#include "Object.h"
using namespace V82JSC;
using namespace v8;
V82JSC::TrackedObject* V82JSC::TrackedObject::makePrivateInstance(IsolateImpl* iso, JSContextRef ctx)
{
TrackedObject* impl = static_cast<TrackedObject*>(HeapAllocator::Alloc(iso, iso->m_tracked_object_map));
impl->m_hash = 1 + rand();
return impl;
}
void V82JSC::TrackedObject::setPrivateInstance(IsolateImpl* iso, JSContextRef ctx, TrackedObject* impl, JSObjectRef object)
{
HandleScope scope(ToIsolate(iso));
// Keep only a weak reference to m_security to avoid cyclical references
impl->m_security = object;
Local<TrackedObject> to = CreateLocal<TrackedObject>(&iso->ii, impl);
void * data = PersistentData<TrackedObject>(ToIsolate(iso), to);
JSClassDefinition def = kJSClassDefinitionEmpty;
def.attributes = kJSClassAttributeNoAutomaticPrototype;
def.finalize = [](JSObjectRef object) {
void * persistent = JSObjectGetPrivate(object);
assert(persistent);
auto location = (v8::internal::Object **)persistent;
IsolateImpl *iso = IsolateImpl::getIsolateFromGlobalHandle(location);
if (!iso) return;
HandleScope scope(ToIsolate(iso));
Local<TrackedObject> local = FromPersistentData<TrackedObject>(ToIsolate(iso), persistent);
assert(!local.IsEmpty());
TrackedObject *impl = ToImpl<TrackedObject>(local);
JSGlobalContextRef gctx = JSContextGetGlobalContext(ToContextRef(iso->m_nullContext.Get(ToIsolate(iso))));
iso->weakJSObjectFinalized(gctx, (JSObjectRef) impl->m_security);
ReleasePersistentData<TrackedObject>(persistent);
};
JSClassRef klass = JSClassCreate(&def);
JSObjectRef private_object = JSObjectMake(ctx, klass, data);
JSClassRelease(klass);
JSValueRef args[] = {
object, private_object, iso->m_private_symbol
};
exec(JSContextGetGlobalContext(ctx),
"Object.defineProperty(_1, _3, {value: _2, enumerable: false, configurable: true, writable: true})",
3, args);
}
V82JSC::TrackedObject* V82JSC::TrackedObject::makePrivateInstance(IsolateImpl* iso, JSContextRef ctx, JSObjectRef object)
{
auto impl = TrackedObject::getPrivateInstance(ctx, object);
if (impl) return impl;
impl = makePrivateInstance(iso, ctx);
TrackedObject::setPrivateInstance(iso, ctx, impl, object);
return impl;
}
V82JSC::TrackedObject* V82JSC::TrackedObject::getPrivateInstance(JSContextRef ctx, JSObjectRef object)
{
IsolateImpl *iso = IsolateFromCtx(ctx);
HandleScope scope(ToIsolate(iso));
//JSObjectRef maybe_proxy_object = JSObjectGetProxyTarget(object);
JSValueRef args[] = {
/*maybe_proxy_object ? maybe_proxy_object :*/ object,
iso->m_private_symbol
};
JSObjectRef private_object = (JSObjectRef) exec(ctx, "return Reflect.get(_1,_2)", 2, args);
if (JSValueIsObject(ctx, private_object)) {
void * persistent = JSObjectGetPrivate(private_object);
if (!persistent) return nullptr;
Local<TrackedObject> local = FromPersistentData<TrackedObject>(ToIsolate(iso), persistent);
assert(!local.IsEmpty());
TrackedObject *impl = ToImpl<TrackedObject>(local);
JSValueRef hasargs[] = {
impl->m_access_proxies,
impl->m_global_object_access_proxies,
object
};
if ((JSValueIsStrictEqual(ctx, object, impl->m_security) ||
(impl->m_proxy_security && JSValueIsStrictEqual(ctx, object, impl->m_proxy_security)) ||
(impl->m_hidden_proxy_security && JSValueIsStrictEqual(ctx, object, impl->m_hidden_proxy_security)) ||
(impl->m_access_proxies && exec(ctx, "return _1.includes(_3)", 3, hasargs)) ||
(impl->m_global_object_access_proxies && exec(ctx, "return _2.includes(_3)", 3, hasargs)) )) {
return impl;
} else if (impl->m_isGlobalObject) {
JSObjectRef proto = object;
while (JSValueIsObject(ctx, proto)) {
if (JSValueIsStrictEqual(ctx, proto, impl->m_security) || JSValueIsStrictEqual(ctx, proto, impl->m_proxy_security)) {
return impl;
}
proto = (JSObjectRef) JSObjectGetPrototype(ctx, proto);
}
}
}
return nullptr;
}
Local<v8::Value> V82JSC::TrackedObject::SecureValue(Local<v8::Value> in, Local<v8::Context> toContext)
{
Isolate* isolate = ToIsolate(*in);
EscapableHandleScope scope(isolate);
#if USE_JAVASCRIPTCORE_PRIVATE_API
/*
V8::AccessControl::ALL_CAN_READ = 1; (1)
V8::AccessControl::ALL_CAN_WRITE = 1 << 1; (2)
*/
static const char *security_proxy =
"var handler = {"
" get : (t, p, r) => { "
" try { return Reflect.get(_2('get',_1,1,p), p); }"
" catch(e) {"
" if (p===Symbol.isConcatSpreadable || p===Symbol.toStringTag) "
" return undefined;"
" else throw e; } },"
" set : (t, p, v, r) => (_2('set',_1,2,p,r)[p] = v) == v,"
" setPrototypeOf : (t, p) => {_2('setPrototypeOf',_1); return Object.setPrototypeOf(_1,p); },"
" getPrototypeOf : (t) => { try{_2('getPrototypeOf',_1); return Object.getPrototypeOf(_1);} catch(e) {return null;}},"
" getOwnPropertyDescriptor : (t, p) => _2('getOwnPropertyDescriptor',_1,1,p,null,true),"
" defineProperty : (t,p,d) => Object.defineProperty(_2('defineProperty',_1),p,d),"
" has : (t, p) => p in _2('has',_1,1,p),"
" deleteProperty : (t, p) => Reflect.deleteProperty(_2('deleteProperty',_1),p),"
" ownKeys : (t) => _2('ownKeys',_1),"
" apply : (t, z, a) => Reflect.apply(_2('apply',_1),z,a),"
" construct : (t, a, n) => Reflect.construct(_2('construct',_1),a,n),"
" preventExtensions : (t) => { Object.preventExtensions(_2('preventExtensions',_1)); return Object.preventExtensions(t); },"
" isExtensible : (t) => { var is = Object.isExtensible(/*_2('isExtensible',_1)*/_1); return is; }"
"};"
"var o = (typeof _1 === 'function') ? function(){} : {};"
"return new Proxy(o, handler);";
Local<v8::Context> context = isolate->GetCurrentContext();
JSContextRef ctx = ToContextRef(context);
JSGlobalContextRef gctx = JSContextGetGlobalContext(ctx);
JSValueRef in_value = ToJSValueRef(in, context);
if (!JSValueIsObject(ctx, in_value)) return in;
JSGlobalContextRef orig_context = JSCPrivate::JSObjectGetGlobalContext((JSObjectRef)in_value);
IsolateImpl* iso = ToIsolateImpl(isolate);
if (orig_context == ToImpl<Context>(iso->m_nullContext.Get(isolate))->m_ctxRef) {
return in;
}
JSGlobalContextRef toGlobalContext = 0;
if (!toContext.IsEmpty()) {
toGlobalContext = JSContextGetGlobalContext(ToContextImpl(toContext)->m_ctxRef);
}
TrackedObject *wrap = getPrivateInstance(ctx, (JSObjectRef)in_value);
JSObjectRef check_proxies = 0;
JSObjectRef global_object = JSContextGetGlobalObject(orig_context);
bool isActualGlobalObject = JSValueIsStrictEqual(ctx, global_object, in_value);
if (isActualGlobalObject && !wrap) {
wrap = makePrivateInstance(iso, ctx, (JSObjectRef)in_value);
wrap->m_isGlobalObject = true;
}
if (!isActualGlobalObject && wrap && wrap->m_global_object_access_proxies) {
JSValueRef args[] = {
wrap->m_global_object_access_proxies,
in_value
};
isActualGlobalObject = JSValueToBoolean(ctx, exec(ctx, "return _1.includes(_2)", 2, args));
}
// If we are putting the global object back into its own context, remove the proxy
if (isActualGlobalObject && orig_context == toGlobalContext) {
return Value::New(ToContextImpl(toContext), JSContextGetGlobalObject(orig_context));
}
if (wrap && wrap->m_isGlobalObject && wrap->m_global_object_access_proxies && isActualGlobalObject) {
check_proxies = wrap->m_global_object_access_proxies;
} else if (wrap && wrap->m_access_proxies && !isActualGlobalObject) {
check_proxies = wrap->m_access_proxies;
}
if (check_proxies) {
int length = static_cast<int>(JSValueToNumber(ctx, exec(ctx, "return _1.length", 1, &check_proxies), 0));
for (int i=0; i<length; i++) {
JSObjectRef maybe_proxy = (JSObjectRef) JSObjectGetPropertyAtIndex(ctx, check_proxies, i, 0);
if (JSCPrivate::JSObjectGetGlobalContext(maybe_proxy) == gctx) {
return scope.Escape(Value::New(ToContextImpl(context), maybe_proxy));
}
}
}
bool install_proxy = orig_context != gctx;
if (!install_proxy) {
if (wrap && !wrap->m_object_template.IsEmpty()) {
Local<v8::ObjectTemplate> ot = wrap->m_object_template.Get(isolate);
if (!ot.IsEmpty()) {
auto oi = ToImpl<ObjectTemplate>(ot);
install_proxy = oi->m_access_check != nullptr;
}
}
}
if (!install_proxy) {
if (!wrap || !wrap->m_isGlobalObject) {
TrackedObject *global_wrap = getPrivateInstance(ctx, JSContextGetGlobalObject(ctx));
install_proxy = global_wrap && (global_wrap->m_isDetached || global_wrap->m_reattached_global);
}
}
if (install_proxy) {
JSClassDefinition def = kJSClassDefinitionEmpty;
def.attributes = kJSClassAttributeNoAutomaticPrototype;
def.callAsFunction = [](JSContextRef ctx, JSObjectRef proxy_function, JSObjectRef thisObject,
size_t argumentCount, const JSValueRef *arguments, JSValueRef *exception) ->JSValueRef
{
IsolateImpl* iso = IsolateFromCtx(ctx);
Isolate* isolate = ToIsolate(iso);
HandleScope scope(isolate);
JSStringRef method = JSValueToStringCopy(ctx, arguments[0], 0);
char buffer[JSStringGetMaximumUTF8CStringSize(method)];
JSStringGetUTF8CString(method, buffer, JSStringGetMaximumUTF8CStringSize(method));
JSStringRelease(method);
/* DEBUG
if (argumentCount > 3) {
JSStringRef prop = JSValueToStringCopy(ctx, arguments[3], 0);
if (!prop) { prop = JSStringCreateWithUTF8CString("[symbol]"); }
char prop_buf[JSStringGetMaximumUTF8CStringSize(prop)];
JSStringGetUTF8CString(prop, prop_buf, JSStringGetMaximumUTF8CStringSize(prop));
JSStringRelease(prop);
printf ("method: %s, property: %s\n", buffer, prop_buf);
} else {
printf ("method: %s\n", buffer);
}
*/
// Always let the private symbol pass through without restriction
if (!strcmp(buffer,"get") && JSValueIsStrictEqual(ctx, arguments[3], iso->m_private_symbol)) {
return arguments[1];
}
if (JSContextGetGlobalContext(ctx) == ToContextRef(iso->m_nullContext.Get(isolate))) {
return arguments[1];
}
JSObjectRef in_value = (JSObjectRef)arguments[1];
JSGlobalContextRef orig_context = JSCPrivate::JSObjectGetGlobalContext(in_value);
Local<v8::Context> accessing_context = LocalContext::New(isolate, ctx);
Local<Object> accessing_object = Value::New(ToContextImpl(accessing_context), in_value).As<Object>();
bool allow = false;
bool detached_behavior = false;
if (argumentCount>4 && !JSValueIsNull(ctx,arguments[4])) {
JSObjectRef target = JSCPrivate::JSObjectGetProxyTarget(accessing_context,(JSObjectRef)arguments[4]);
/* FIXME! Not sure this will always work correctly
if (!JSValueIsStrictEqual(ctx, target, in_value)) {
return in_value;
}
*/
if (target == 0) {
return in_value;
}
}
ObjectTemplate* oi = nullptr;
TrackedObject *wrap = getPrivateInstance(ctx, in_value);
AccessControl access_granted = DEFAULT;
if (wrap) {
Local<v8::ObjectTemplate> ot = wrap->m_object_template.Get(isolate);
oi = ot.IsEmpty() ? nullptr : ToImpl<ObjectTemplate>(ot);
}
if (wrap && wrap->m_isDetached) {
allow = false;
detached_behavior = true;
} else if (wrap && wrap->m_reattached_global) {
Local<v8::Value> reattached = SecureValue(Value::New(ToContextImpl(accessing_context),
wrap->m_reattached_global));
in_value = (JSObjectRef) ToJSValueRef(reattached, accessing_context);
allow = true;
} else {
if (argumentCount > 3) {
auto access_requested = static_cast<v8::AccessControl>(JSValueToNumber(ctx, arguments[2], 0));
JSObjectRef proto = in_value;
TrackedObject *proto_wrap = wrap;
while (JSValueIsObject(ctx, proto)) {
if (proto_wrap && proto_wrap->m_access_control) {
JSValueRef args[] = {
proto_wrap->m_access_control,
arguments[3]
};
JSValueRef access_granted_value = exec(ctx, "return _1[_2]", 2, args);
if (JSValueIsNumber(ctx, access_granted_value)) {
access_granted = static_cast<AccessControl>(JSValueToNumber(ctx, access_granted_value, 0));
break;
}
}
proto = (JSObjectRef) GetRealPrototype(accessing_context, proto);
if (JSValueIsObject(ctx, proto)) {
proto_wrap = getPrivateInstance(ctx, proto);
}
}
allow = (access_granted & access_requested) != 0;
}
// If we don't have our own access check, inherit from our global object
TrackedObject *global_wrap = nullptr;
AccessCheckCallback access_check = oi ? oi->m_access_check : nullptr;
JSValueRef access_check_data = oi ? oi->m_access_check_data : 0;
if (!access_check && (!wrap || !wrap->m_isGlobalObject)) {
JSObjectRef global = JSContextGetGlobalObject(orig_context);
global_wrap = getPrivateInstance(orig_context, global);
if (global_wrap && !global_wrap->m_object_template.IsEmpty()) {
auto otmpl = ToImpl<ObjectTemplate>(global_wrap->m_object_template.Get(isolate));
access_check = otmpl->m_access_check;
access_check_data = otmpl->m_access_check_data;
}
}
if (global_wrap && global_wrap->m_isDetached) {
allow = !(JSContextGetGlobalContext(ctx) == orig_context);
detached_behavior = true;
} else {
// STEP 1: If there is an access check callback, use that to decide
if (!allow && access_check) {
if (wrap->m_isGlobalObject && (JSContextGetGlobalContext(ctx) == orig_context)) {
allow = true;
} else {
Local<v8::Value> accessing_data = Value::New(ToContextImpl(accessing_context), access_check_data);
allow = access_check(accessing_context, accessing_object, accessing_data);
}
// STEP 2: If no access check callback, see if security tokens match
} else if (!allow) {
Local<v8::Value> accessing_token = accessing_context->GetSecurityToken();
Local<v8::Context> orig_global_context = iso->m_global_contexts[orig_context].Get(isolate);
Local<v8::Value> orig_token = orig_global_context->GetSecurityToken();
if (orig_token.IsEmpty() && (!wrap || !wrap->m_isGlobalObject)) {
allow = true;
} else if (orig_token.IsEmpty()) {
allow = false;
} else if (accessing_token.IsEmpty()) {
allow = orig_token.IsEmpty();
} else {
allow = accessing_token->StrictEquals(orig_token);
}
}
}
}
if (allow && !strcmp(buffer, "apply") && detached_behavior) {
return exec(ctx, "return function(){}", 0, nullptr);
}
if (allow && !strcmp(buffer, "getOwnPropertyDescriptor")) {
JSValueRef proto = JSObjectGetPrototype(ctx, in_value);
if (wrap && wrap->m_isGlobalObject && JSValueIsObject(ctx, proto)) {
proto = JSObjectGetPrototype(ctx, (JSObjectRef)proto);
}
JSValueRef args[] = {
in_value,
arguments[3],
proto,
JSValueMakeNumber(ctx, access_granted)
};
JSValueRef desc = 0;
if (wrap && wrap->m_isGlobalObject) {
desc = exec(ctx,
"var d = Object.getOwnPropertyDescriptor(_1, _2); "
"if (d!==undefined) { d.configurable = true; return d; }"
"d = Object.getOwnPropertyDescriptor(_3, _2); "
"if (d!==undefined) {return {"
" value: _1[_2],"
" writable: (_4&2)==2,"
" enumerable: d.enumerable,"
" configurable: true"
"}} else { return undefined; }",
4, args);
} else {
desc = exec(ctx, "return Object.getOwnPropertyDescriptor(_1, _2)", 2, args);
}
return desc;
}
if (!strcmp(buffer, "ownKeys")) {
JSValueRef keys;
if (allow) {
keys = exec(ctx, "return Reflect.ownKeys(_1)", 1, &in_value);
} else if (wrap && wrap->m_access_control) {
keys = exec(ctx, "var keys=[]; for (e in _1) { if ((_1[e] & 1)==1) keys.push(e); } return keys;", 1, &wrap->m_access_control);
} else {
keys = JSObjectMakeArray(ctx, 0, nullptr, 0);
}
return keys;
}
if (!allow) {
int at =
!strcmp(buffer, "get") ? AccessType::ACCESS_GET:
!strcmp(buffer, "set") ? AccessType::ACCESS_SET:
!strcmp(buffer, "has") ? AccessType::ACCESS_HAS:
!strcmp(buffer, "getOwnPropertyDescriptor") ? AccessType::ACCESS_HAS:
!strcmp(buffer, "ownKeys") ? AccessType::ACCESS_KEYS:
!strcmp(buffer, "deleteProperty") ? AccessType::ACCESS_DELETE: -1;
if (at>=0) {
if (iso->m_failed_access_check_callback) {
iso->m_failed_access_check_callback(accessing_object, static_cast<AccessType>(at), Local<v8::Value>());
}
}
*exception = ToJSValueRef(Exception::TypeError
(v8::String::NewFromUtf8(isolate, "access denied",
NewStringType::kNormal).ToLocalChecked()), accessing_context);
return NULL;
}
return in_value;
};
JSClassRef klass = JSClassCreate(&def);
JSObjectRef accessor_func = JSObjectMake(ctx, klass, 0);
JSValueRef args[] = {
in_value,
accessor_func
};
JSValueRef out_value = exec(ctx, security_proxy, 2, args);
wrap = makePrivateInstance(ToIsolateImpl(isolate), ctx, (JSObjectRef)in_value);
if (isActualGlobalObject) {
if (!wrap->m_global_object_access_proxies) {
wrap->m_global_object_access_proxies = JSObjectMakeArray(ctx, 1, &out_value, 0);
JSValueProtect(ctx, wrap->m_global_object_access_proxies);
} else {
JSValueRef args[] = {
wrap->m_global_object_access_proxies,
out_value
};
exec(ctx, "_1.push(_2)", 2, args);
}
} else {
if (!wrap->m_access_proxies) {
wrap->m_access_proxies = JSObjectMakeArray(ctx, 1, &out_value, 0);
JSValueProtect(ctx, wrap->m_access_proxies);
} else {
JSValueRef args[] = {
wrap->m_access_proxies,
out_value
};
exec(ctx, "_1.push(_2)", 2, args);
}
if (!wrap->m_proxy_security) {
wrap->m_proxy_security = in_value;
}
}
Local<v8::Value> out = Value::New(ToContextImpl(context), out_value);
return scope.Escape(out);
}
#endif
return scope.Escape(in);
}
| 10,829 |
652 | from datetime import datetime
from functools import wraps
import flask
import requests
from flask import Blueprint
from flask_babel import gettext
from flask_login import current_user, login_required, logout_user
from flask_weasyprint import HTML, render_pdf
from loguru import logger
from sqlalchemy import and_
from server.api.blueprints.login import create_user_from_data
from server.api.database.models import (
Day,
Appointment,
Payment,
PaymentType,
Report,
ReportType,
Student,
Teacher,
User,
WorkDay,
Kilometer,
Car,
CarType,
)
from server.api.push_notifications import FCM
from server.api.utils import jsonify_response, paginate
from server.consts import RECEIPT_URL, RECEIPTS_DEVELOPER_EMAIL, WORKDAY_DATE_FORMAT
from server.error_handling import NotificationError, RouteError
teacher_routes = Blueprint("teacher", __name__, url_prefix="/teacher")
def init_app(app):
app.register_blueprint(teacher_routes)
def teacher_required(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
if not current_user.teacher:
raise RouteError("User is not a teacher.", 401)
return func(*args, **kwargs)
return func_wrapper
def like_filter(model, key, value):
return getattr(model, key).like(f"%{value}%")
@teacher_routes.route("/", methods=["GET"])
@jsonify_response
@paginate
def teachers():
try:
extra_filters = {User: {"name": like_filter}}
query = Teacher.query.filter_by(is_approved=True)
return Teacher.filter_and_sort(
flask.request.args,
extra_filters=extra_filters,
query=query,
with_pagination=True,
)
except ValueError:
raise RouteError("Wrong parameters passed.")
@teacher_routes.route("/work_days", methods=["GET"])
@jsonify_response
@login_required
@teacher_required
def work_days():
""" return work days with filter - only on a specific date,
or with no date at all"""
try:
return {
"data": [
day.to_dict()
for day in current_user.teacher.filter_work_days(flask.request.args)
]
}
except ValueError:
raise RouteError("Wrong parameters passed.")
@teacher_routes.route("/work_days", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def update_work_days():
data = flask.request.get_json()
""" example data:
0: [{from_hour: 8, from_minutes: 0, to_hour: 14}], 1: {}....
OR
"03-15-2019": [{from_hour: 8}], "03-16-2019": []....
"""
logger.debug(f"WORK DAYS - got the following data")
logger.debug(data)
for day, hours_list in data.items():
# first, let's delete all current data with this date
# TODO better algorithm for that
try:
day = int(day)
params = dict(day=day, teacher=current_user.teacher)
WorkDay.query.filter_by(**params).delete()
except ValueError:
# probably a date
params = dict(
on_date=datetime.strptime(day, WORKDAY_DATE_FORMAT),
teacher=current_user.teacher,
)
WorkDay.query.filter_by(**params).delete()
for hours in hours_list:
from_hour = max(min(int(hours.get("from_hour")), 24), 0)
to_hour = max(min(int(hours.get("to_hour")), 24), 0)
from_minutes = max(min(int(hours.get("from_minutes")), 60), 0)
to_minutes = max(min(int(hours.get("to_minutes")), 60), 0)
car = current_user.teacher.cars.filter_by(id=hours.get("car_id")).first()
if not car:
car = current_user.teacher.cars.first()
if from_hour >= to_hour:
raise RouteError(
"There must be a bigger difference between the two times."
)
current_user.teacher.work_days.append(
WorkDay(
from_hour=from_hour,
from_minutes=from_minutes,
to_hour=to_hour,
to_minutes=to_minutes,
car=car,
**params,
)
)
current_user.save()
return {"message": "Days updated."}
@teacher_routes.route("/work_days/<int:day_id>", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def edit_work_day(day_id):
day = current_user.teacher.work_days.filter_by(id=day_id).first()
if not day:
raise RouteError("Day does not exist", 404)
data = flask.request.get_json()
from_hour = data.get("from_hour", day.from_hour)
to_hour = data.get("to_hour", day.to_hour)
day.update(from_hour=from_hour, to_hour=to_hour)
return {"message": "Day updated successfully."}
@teacher_routes.route("/work_days/<int:day_id>", methods=["DELETE"])
@jsonify_response
@login_required
@teacher_required
def delete_work_day(day_id):
day = current_user.teacher.work_days.filter_by(id=day_id).first()
if not day:
raise RouteError("Day does not exist", 404)
day.delete()
return {"message": "Day deleted."}
@teacher_routes.route("/<int:teacher_id>/available_hours", methods=["POST"])
@jsonify_response
@login_required
def available_hours(teacher_id):
data = flask.request.get_json()
teacher = Teacher.get_by_id(teacher_id)
duration = data.get("duration")
if duration:
duration = int(duration)
only_approved = False
student = None
if current_user.teacher:
only_approved = True
else:
student = current_user.student
places = (data.get("meetup_place_id", None), data.get("dropoff_place_id", None))
return {
"data": list(
teacher.available_hours(
datetime.strptime(data.get("date"), WORKDAY_DATE_FORMAT),
student=student,
duration=duration,
only_approved=only_approved,
places=places,
)
)
}
@teacher_routes.route("/add_payment", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def add_payment():
data = flask.request.get_json()
student = Student.get_by_id(data.get("student_id"))
amount = data.get("amount")
details = data.get("details")
if not student:
raise RouteError("Student does not exist.")
if not amount:
raise RouteError("Amount must not be empty.")
if not details:
raise RouteError("Details must not be empty.")
payment = Payment.create(
teacher=current_user.teacher,
student=student,
amount=amount,
payment_type=getattr(PaymentType, data.get("payment_type", ""), 1),
details=details,
crn=int(data.get("crn")) if data.get("crn") else None,
)
# send notification to student
if student.user.firebase_token:
logger.debug(f"sending fcm to {student.user} for new payment")
try:
FCM.notify(
token=student.user.firebase_token,
title=gettext("New Payment"),
body=gettext(
"%(user)s charged you for %(amount)s",
user=current_user.name,
amount=amount,
),
)
except NotificationError:
pass
return {"data": payment.to_dict()}, 201
@teacher_routes.route("/students", methods=["GET"])
@jsonify_response
@login_required
@teacher_required
@paginate
def students():
"""allow filtering by name / area of student, and sort by balance,
lesson number"""
try:
query = current_user.teacher.students
args = flask.request.args
extra_filters = {User: {"name": like_filter, "area": like_filter}}
return Student.filter_and_sort(
args, query, extra_filters=extra_filters, with_pagination=True
)
except ValueError:
raise RouteError("Wrong parameters passed.")
@teacher_routes.route("/edit_data", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def edit_data():
post_data = flask.request.get_json()
teacher = current_user.teacher
fields = ("price", "lesson_duration")
for field in fields:
if post_data.get(field):
setattr(teacher, field, post_data.get(field))
teacher.save()
return {"data": current_user.to_dict()}
@teacher_routes.route("/<int:teacher_id>/approve", methods=["GET"])
@jsonify_response
@login_required
def approve(teacher_id):
if not current_user.is_admin:
raise RouteError("Not authorized.", 401)
teacher = Teacher.get_by_id(teacher_id)
teacher.update(is_approved=True)
return {"data": teacher.to_dict()}
@teacher_routes.route("/ezcount_user", methods=["GET"])
@jsonify_response
@login_required
@teacher_required
def create_ezcount_user():
# https://docs.google.com/document/d/1me6u9CpJtydTIEdMkY3OH1dresZkrPRCK0_xw5Rn0Do/edit#
teacher = current_user.teacher
if not teacher.crn:
return
if teacher.invoice_api_key:
raise RouteError("Teacher already has an invoice account.")
api_key = flask.current_app.config.get("RECEIPTS_API_KEY")
payload = {
"api_key": api_key,
"api_email": RECEIPTS_DEVELOPER_EMAIL,
"developer_email": RECEIPTS_DEVELOPER_EMAIL,
"create_signature": 1,
"company_crn": teacher.crn,
"company_email": current_user.email,
"user_key": str(current_user.id),
"company_name": current_user.name,
"company_type": 1,
}
resp = requests.post(RECEIPT_URL + "api/user/create", json=payload)
resp_json = resp.json()
if resp_json["success"]:
teacher.update(
invoice_api_key=resp_json["u_api_key"], invoice_api_uid=resp_json["u_uuid"]
)
return {"message": "EZCount user created successfully."}
raise RouteError(resp_json["errMsg"])
@teacher_routes.route("/payments/<int:payment_id>/receipt", methods=["GET"])
@jsonify_response
@login_required
@teacher_required
def add_receipt(payment_id):
# https://docs.google.com/document/d/1_kSH5xViiZi5Y1tZtWpNrkKiq4Htym7V23TuhL7KlSU/edit#
payment = Payment.get_by_id(payment_id)
if not payment or payment.teacher != current_user.teacher:
raise RouteError("Payment not found.", 404)
if not payment.teacher.invoice_api_key:
raise RouteError("Teacher does not have an invoice account.")
api_key = flask.current_app.config.get("RECEIPTS_API_KEY")
payload = {
"api_key": payment.teacher.invoice_api_key,
"developer_email": RECEIPTS_DEVELOPER_EMAIL,
"created_by_api_key": api_key,
"transaction_id": payment.id,
"type": 320,
"customer_name": payment.student.user.name,
"customer_email": payment.student.user.email,
"customer_crn": payment.crn,
"item": {
1: {
"details": payment.details,
"amount": "1",
"price": payment.amount,
"price_inc_vat": 1, # this price include the VAT
}
},
"payment": {
1: {"payment_type": payment.payment_type.value, "payment": payment.amount}
},
"price_total": payment.amount, # /*THIS IS A MUST ONLY IN INVOICE RECIEPT*/
}
resp = requests.post(RECEIPT_URL + "api/createDoc", json=payload)
resp_json = resp.json()
if resp_json["success"]:
payment.update(pdf_link=resp_json["pdf_link"])
return {"pdf_link": resp_json["pdf_link"]}
raise RouteError(resp_json["errMsg"])
@teacher_routes.route("/ezcount", methods=["GET"])
@jsonify_response
@login_required
@teacher_required
def login_to_ezcount():
# https://docs.google.com/document/d/1me6u9CpJtydTIEdMkY3OH1dresZkrPRCK0_xw5Rn0Do/edit#
if not current_user.teacher.invoice_api_key:
raise RouteError("Teacher does not have an invoice account.")
redirect = flask.request.args.get("redirect", "")
resp = requests.post(
RECEIPT_URL + f"api/getClientSafeUrl/login?redirectTo={redirect}",
json={
"api_key": current_user.teacher.invoice_api_key,
"api_email": current_user.email,
"developer_email": RECEIPTS_DEVELOPER_EMAIL,
},
)
return {"url": resp.json()["url"]}
@teacher_routes.route("/reports", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def create_report():
post_data = flask.request.get_json()
try:
report_type = ReportType[post_data.get("report_type")]
except KeyError:
raise RouteError("Report type was not found.")
car = current_user.teacher.cars.filter_by(id=post_data.get("car")).first()
dates = dict()
if report_type.name in Report.DATES_REQUIRED:
dates["since"] = post_data.get("since")
dates["until"] = post_data.get("until")
try:
dates["since"] = datetime.strptime(
dates["since"], WORKDAY_DATE_FORMAT
).replace(second=0, microsecond=0)
dates["until"] = datetime.strptime(
dates["until"], WORKDAY_DATE_FORMAT
).replace(second=0, microsecond=0)
except (ValueError, TypeError):
raise RouteError("Dates are not valid.")
report = Report.create(
report_type=report_type.value, teacher=current_user.teacher, car=car, **dates
)
return {"data": report.to_dict()}
@teacher_routes.route("/reports/<uuid>", methods=["GET"])
def show_report(uuid):
REPORTS = {
"students": lambda report: report.teacher.students.filter_by(is_active=True)
.join(User, Student.user)
.order_by(User.name.asc()),
"lessons": lambda report: report.teacher.lessons.filter(
and_(
Appointment.is_approved == True,
Appointment.date < report.until,
Appointment.date > report.since,
)
),
"kilometers": lambda report: report.teacher.kilometers.filter(
and_(
Kilometer.date < report.until,
Kilometer.date > report.since,
Kilometer.car == report.car,
)
),
}
report = Report.query.filter_by(uuid=uuid).first()
if not report:
raise RouteError("Report was not found.")
report_data = REPORTS.get(report.report_type.name)
html = flask.render_template(
f"reports/{report.report_type.name}.html",
data=report_data(report).all(),
teacher=report.teacher,
report=report,
)
return render_pdf(HTML(string=html))
# return html
@teacher_routes.route("/create_student", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def create_bot_student():
teacher = current_user.teacher
data = flask.request.values
user = create_user_from_data(data, required=["email", "name", "phone"])
car = teacher.cars.filter_by(id=data.get("car_id")).first()
if not car:
raise RouteError("Car does not exist.")
try:
price = int(data.get("price", ""))
except ValueError:
price = None
student = Student.create(
user=user,
teacher=teacher,
creator=current_user,
price=price,
car=car,
is_approved=True,
)
return {"data": student.user.to_dict()}, 201
@teacher_routes.route("/<int:teacher_id>/cars", methods=["GET"])
@jsonify_response
@login_required
def cars(teacher_id):
teacher = Teacher.get_by_id(teacher_id)
if not teacher:
raise RouteError("Teacher not found.")
return {
"data": [car.to_dict() for car in Car.query.filter_by(teacher=teacher).all()]
}
@teacher_routes.route("/cars", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def register_car():
"""register a new car for a teacher"""
data = flask.request.get_json()
number = data.get("number")
if not number:
raise RouteError("Car number is required.")
# if this number already exist, raise error
exists = current_user.teacher.cars.filter_by(number=number).first()
if exists:
raise RouteError("Car already exists.")
try:
type_ = CarType[data.get("type", "")]
except KeyError:
type_ = CarType.manual
color = data.get("color")
car = Car.create(
name=data.get("name"),
type=type_.value,
number=number,
teacher=current_user.teacher,
color=color[:6] if color else None,
)
return {"data": car.to_dict()}, 201
@teacher_routes.route("/cars/<int:id_>", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def update_car(id_):
data = flask.request.get_json()
car = current_user.teacher.cars.filter_by(id=id_).first()
if not car:
raise RouteError("Car does not exist.")
number = data.get("number")
if not number:
raise RouteError("Car number is required.")
try:
type_ = CarType[data.get("type", "")]
except KeyError:
type_ = CarType.manual
color = data.get("color")
car.update(
name=data.get("name"),
type=type_.value,
number=number,
color=color[:6] if color else None,
)
return {"data": car.to_dict()}
@teacher_routes.route("/cars/<int:id_>", methods=["DELETE"])
@jsonify_response
@login_required
@teacher_required
def delete_car(id_):
car = current_user.teacher.cars.filter_by(id=id_).first()
if not car:
raise RouteError("Car does not exist.")
car.delete()
return {"message": "Car deleted."}
@teacher_routes.route("/cars/<int:id_>/kilometer", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def update_kilometer(id_):
"""update kilometer for a specific date"""
car = current_user.teacher.cars.filter_by(id=id_).first()
if not car:
raise RouteError("Car does not exist.")
data = flask.request.get_json()
try:
date = datetime.strptime(data.get("date"), WORKDAY_DATE_FORMAT)
except (ValueError, TypeError):
raise RouteError("Date is not valid.")
# if this date exist, delete it first
exists = current_user.teacher.kilometers.filter_by(date=date).first()
if exists:
exists.delete()
start, end = data.get("start"), data.get("end")
if not start or not end:
raise RouteError("All kilometer distances are required.")
if end < start:
raise RouteError("Ending value must be bigger than starting value.")
kilometer = Kilometer.create(
date=date,
personal=data.get("personal", 0),
start_of_day=start,
end_of_day=end,
car=car,
teacher=current_user.teacher,
)
return {"data": kilometer.to_dict()}, 201
| 8,223 |
2,571 | // ***************************************************************
// Copyright (c) 2021 Jittor. All Rights Reserved.
// Maintainers:
// <NAME> <<EMAIL>>
// <NAME> <<EMAIL>>.
//
// This file is subject to the terms and conditions defined in
// file 'LICENSE.txt', which is part of this source code package.
// ***************************************************************
#pragma once
#include "op.h"
namespace jittor {
struct MklConvOp : Op {
Var* x, * w, * y;
int strideh, stridew, paddingh, paddingw, dilationh, dilationw, groups;
string xformat, wformat, yformat;
/* MklConvOp: xformat abcd represents nchw */
MklConvOp(Var* x, Var* w, int strideh, int stridew, int paddingh, int paddingw, int dilationh=1, int dilationw=1, int groups=1, string xformat="abcd", string wformat="oihw", string yformat="");
const char* name() const override { return "mkl_conv"; }
void infer_shape() override;
DECLARE_jit_run;
};
} // jittor | 337 |
6,457 | <gh_stars>1000+
// License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2020 Intel Corporation. All Rights Reserved.
#include <numeric>
#include "reflectivity.h"
#include <types.h>
using namespace rs2;
static const size_t N_STD_FRAMES = 100;
static const int NINETY_FIVE_PERCENT_OF_STD_PERIOD = static_cast< int >( 0.95 * N_STD_FRAMES );
static const float MAX_RANGE_IN_UNIT = 65536.0f;
static const float LONG_THERMAL = 74.5f;
static const float INDOOR_MAX_RANGE = 9.0f;
// TODO try to read from LRS
static const float FOV_H = 0.610865f;
static const float FOV_V = 0.479966f;
static const int VGA_WIDTH = 640;
static const int VGA_HEIGHT = 480;
static const int VGA_HALF_WIDTH = VGA_WIDTH / 2;
static const int VGA_HALF_HEIGHT = VGA_HEIGHT / 2;
static bool is_close_to_zero( float x )
{
return ( std::abs( x ) < std::numeric_limits< float >::epsilon() );
}
reflectivity::reflectivity()
: _history_size( 0 )
{
_dist_queue.assign( N_STD_FRAMES,
0 ); // Allocate size for all samples in advance to minimize runtime.
}
// Reflectivity analysis is based on STD and normalized IR.Final reflectivity values are weighted
// average between them
float reflectivity::get_reflectivity( float raw_noise_estimation,
float max_usable_range,
float ir_val ) const
{
// Calculating STD over time(temporal noise), when there are more than 5 % invalid return high
// value
std::vector< float > _filt_dist_arr( _dist_queue.size() );
// Copy non zero (or close to zero) elements in history
auto new_end = std::copy_if( _dist_queue.begin(),
_dist_queue.end(),
_filt_dist_arr.begin(),
[]( float elem ) { return ! is_close_to_zero( elem ); } );
_filt_dist_arr.resize( new_end - _filt_dist_arr.begin() );
if( 0 == _filt_dist_arr.size() )
throw std::logic_error( "reflectivity N/A, not enough data was collected" );
float sum = std::accumulate( _filt_dist_arr.begin(), _filt_dist_arr.end(), 0.0f );
float mean = sum / _filt_dist_arr.size();
float standard_deviation = 9999.0f;
if( _filt_dist_arr.size() >= NINETY_FIVE_PERCENT_OF_STD_PERIOD )
{
float variance = 0.0f;
for( auto val : _filt_dist_arr )
variance += std::pow( val - mean, 2.0f );
variance = variance / _filt_dist_arr.size();
standard_deviation = std::sqrt( variance );
}
// Range is calculated based on position in map(assuming 0 tilt) Distance is just based on plane distance
auto range = mean / 1000.0f;
// Normalized IR instead of IR is used to be robust to high ambient, RSS is assumed between echo to noise
auto nest = raw_noise_estimation / 16.0f;
auto normalized_ir = 0.0f;
if( ir_val > nest )
normalized_ir = std::pow( std::pow( ir_val, 2.0f ) - std::pow( nest, 2.0f ), 0.5f );
auto ref_from_ir = 0.0f;
auto ref_from_std = 0.0f;
auto i_ref = 0.0f;
auto s_ref = 0.0f;
auto i_dist_85 = 0.0f;
auto s_dist_85 = 0.0f;
auto ref = 0.0f;
if( nest <= LONG_THERMAL * 2.0f )
{
// We can hold reflectivity information from IR
if( normalized_ir < 70.0f )
ref_from_ir = 0.5f; // Low confidence as IR values are low
else
ref_from_ir = 1.0f; // High confidence as IR values hold good SNR
// Analyzing reflectivity based on 85 % reflectivity data
i_dist_85 = 6.75f * std::exp( -0.012f * normalized_ir );
i_dist_85 = i_dist_85 * max_usable_range / INDOOR_MAX_RANGE;
if( 0.0f < i_dist_85 ) // protect devision by zero
{
i_ref = static_cast<float>(0.85f * std::pow( ( range / i_dist_85 ), 2 ));
if( i_ref > 0.95f )
{
ref_from_ir = 0.75f;
i_ref = 0.95f;
}
}
else
ref_from_ir = 0.0f;
}
// STD based analysis
// STD values are at the edges, hard to get data
if( standard_deviation >= 1.5f || standard_deviation <= 10.f )
{
if( standard_deviation > 2.5f )
ref_from_std = 0.5f; // Low confidence data due to low STD values
else
ref_from_std = 1.0f; // High confidence data
// STD based analysis based on 85% reflectivity data
if( range > 0.0f )
{
s_dist_85 = ( 2.25f * ( std::log( standard_deviation ) ) + 1.56f ) * max_usable_range
/ INDOOR_MAX_RANGE;
if( 0 < s_dist_85 ) // protect devision by zero
{
s_ref = 0.85f * std::pow( ( range / s_dist_85 ), 2.0f );
if( s_ref > 0.95f )
{
ref_from_std = ref_from_std * 0.75f;
s_ref = 0.95f;
}
}
else
ref_from_std = 0.0f;
}
else
{
ref_from_std = 0.0f;
}
}
// Calculating Final reflectivity
if( is_close_to_zero( ref_from_ir + ref_from_std ) )
throw std::logic_error( "reflectivity N/A" );
ref = ( i_ref * ref_from_ir + s_ref * ref_from_std ) / ( ref_from_ir + ref_from_std );
// Force 15% resolution
if( ref >= 0.85f )
ref = 0.85f;
else if( ref >= 0.7f )
ref = 0.7f;
else if( ref >= 0.55f )
ref = 0.55f;
else if( ref >= 0.4f )
ref = 0.4f;
else if( ref >= 0.25f )
ref = 0.25f;
else
ref = 0.1f;
return ref;
}
void reflectivity::add_depth_sample( float depth_val, int x_in_image, int y_in_image )
{
if( x_in_image >= 0 && x_in_image < VGA_WIDTH && y_in_image >= 0 && y_in_image < VGA_HEIGHT )
{
auto dist_z = round( depth_val * 16000.f / MAX_RANGE_IN_UNIT ); // convert to mm units
float x_ang = FOV_H * std::abs( VGA_HALF_WIDTH - x_in_image ) / VGA_HALF_WIDTH;
float y_ang = FOV_V * std::abs( VGA_HALF_HEIGHT - y_in_image ) / VGA_HALF_HEIGHT;
auto dist_r = dist_z * std::sqrt( 1.0f + (std::pow( 2.0f * std::tan( x_ang ), 2.0f ) + std::pow( 2.0f * std::tan( y_ang ), 2.0f ) ) / 4.0f );
if( _dist_queue.size() >= N_STD_FRAMES ) // Keep queue as N_STD_FRAMES size queue
_dist_queue.pop_front();
_dist_queue.push_back( dist_r );
if( _history_size < N_STD_FRAMES )
_history_size++;
}
}
void rs2::reflectivity::reset_history()
{
if( _history_size > 0 )
{
_dist_queue.assign( N_STD_FRAMES, 0 );
_history_size = 0;
}
}
float rs2::reflectivity::get_samples_ratio() const
{
return static_cast< float >( history_size() ) / history_capacity();
}
bool rs2::reflectivity::is_history_full() const
{
return history_size() == history_capacity();
}
// Return the history queue capacity
size_t rs2::reflectivity::history_capacity() const
{
return N_STD_FRAMES;
}
// Return the history queue current size
size_t rs2::reflectivity::history_size() const
{
return _history_size;
}
| 3,376 |
416 | <filename>sfm-jdbc/src/test/java/org/simpleflatmapper/jdbc/test/NamedSqlQueryTest.java<gh_stars>100-1000
package org.simpleflatmapper.jdbc.test;
import org.junit.Test;
import org.simpleflatmapper.jdbc.named.NamedSqlQuery;
import static org.junit.Assert.assertNotNull;
public class NamedSqlQueryTest {
@Test
public void testParse() throws Exception {
NamedSqlQuery namedSqlQuery = NamedSqlQuery.parse("INSERT INTO TABLE VALUES(:id, :name, :email);");
assertNotNull(namedSqlQuery);
}
} | 201 |
376 | <reponame>leemgs/node-mariasql
/*
Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#include <my_global.h>
#include <my_rnd.h>
#include <m_string.h>
#if defined(HAVE_YASSL)
#if defined(YASSL_PREFIX)
#define RAND_bytes yaRAND_bytes
#endif /* YASSL_PREFIX */
#include <openssl/ssl.h>
#elif defined(HAVE_OPENSSL)
#include <openssl/rand.h>
#endif /* HAVE_YASSL */
/*
A wrapper to use OpenSSL/yaSSL PRNGs.
*/
extern "C" {
/*
Initialize random generator
NOTES
MySQL's password checks depends on this, so don't do any changes
that changes the random numbers that are generated!
*/
void my_rnd_init(struct my_rnd_struct *rand_st, ulong seed1, ulong seed2)
{
#ifdef HAVE_valgrind
bzero((char*) rand_st,sizeof(*rand_st)); /* Avoid UMC varnings */
#endif
rand_st->max_value= 0x3FFFFFFFL;
rand_st->max_value_dbl=(double) rand_st->max_value;
rand_st->seed1=seed1%rand_st->max_value ;
rand_st->seed2=seed2%rand_st->max_value;
}
/**
Generate random number.
@param rand_st [INOUT] Structure used for number generation.
@retval Generated pseudo random number.
*/
double my_rnd(struct my_rnd_struct *rand_st)
{
rand_st->seed1= (rand_st->seed1*3+rand_st->seed2) % rand_st->max_value;
rand_st->seed2= (rand_st->seed1+rand_st->seed2+33) % rand_st->max_value;
return (((double) rand_st->seed1) / rand_st->max_value_dbl);
}
/**
Generate a random number using the OpenSSL/yaSSL supplied
random number generator if available.
@param rand_st [INOUT] Structure used for number generation
only if none of the SSL libraries are
available.
@retval Generated random number.
*/
double my_rnd_ssl(struct my_rnd_struct *rand_st)
{
#if defined(HAVE_YASSL) || defined(HAVE_OPENSSL)
int rc;
unsigned int res;
#if defined(HAVE_YASSL)
rc= yaSSL::RAND_bytes((unsigned char *) &res, sizeof (unsigned int));
#else
rc= RAND_bytes((unsigned char *) &res, sizeof (unsigned int));
#endif /* HAVE_YASSL */
if (rc)
return (double)res / (double)UINT_MAX;
#endif /* defined(HAVE_YASSL) || defined(HAVE_OPENSSL) */
return my_rnd(rand_st);
}
}
| 1,063 |
2,542 | <gh_stars>1000+
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
using namespace Common;
using namespace std;
using namespace Naming;
using namespace ServiceModel;
using namespace Store;
using namespace Management::ClusterManager;
StringLiteral const TraceComponent("ApplicationUpdateContext");
RolloutContextType::Enum const ApplicationUpdateContextType(RolloutContextType::ApplicationUpdate);
ApplicationUpdateContext::ApplicationUpdateContext(
Common::ComponentRoot const & replica,
ClientRequestSPtr const & clientRequest,
Common::NamingUri const & applicationName,
ServiceModelApplicationId const & applicationId,
Reliability::ApplicationCapacityDescription const & currentCapacity,
Reliability::ApplicationCapacityDescription const & updatedCapacity)
: RolloutContext(ApplicationUpdateContextType, replica, clientRequest)
, applicationName_(applicationName)
, applicationId_(applicationId)
, currentCapacities_(currentCapacity)
, updatedCapacities_(updatedCapacity)
{
}
ApplicationUpdateContext::ApplicationUpdateContext(ApplicationUpdateContext && other)
: RolloutContext(move(other))
, applicationName_(move(other.applicationName_))
, applicationId_(move(other.applicationId_))
, currentCapacities_(move(other.currentCapacities_))
, updatedCapacities_(move(other.updatedCapacities_))
{
}
ApplicationUpdateContext::ApplicationUpdateContext(
Common::NamingUri const & appName)
: RolloutContext(ApplicationUpdateContextType)
, applicationName_(appName)
, applicationId_(0)
, currentCapacities_()
, updatedCapacities_()
{
}
ApplicationUpdateContext & ApplicationUpdateContext::operator=(
ApplicationUpdateContext && other)
{
if (this != &other)
{
__super::operator = (move(other));
this->applicationName_ = move(other.applicationName_);
this->applicationId_ = move(other.applicationId_);
this->currentCapacities_ = move(other.currentCapacities_);
this->updatedCapacities_ = move(other.updatedCapacities_);
}
return *this;
}
std::wstring const & ApplicationUpdateContext::get_Type() const
{
return Constants::StoreType_ApplicationUpdateContext;
}
std::wstring ApplicationUpdateContext::ConstructKey() const
{
wstring temp;
StringWriter writer(temp);
writer.Write("{0}", applicationName_);
return temp;
}
void ApplicationUpdateContext::WriteTo(Common::TextWriter & w, Common::FormatOptions const &) const
{
w.Write(
"ApplicationUpdateContext({0})[name={1}, applicationId={2}, currentCapacities=[{3}], updatedCapacities=[{4}]]",
this->Status,
applicationName_,
applicationId_,
currentCapacities_,
updatedCapacities_);
}
| 1,003 |
5,169 | <filename>Specs/0/3/8/AlleeSDK/1.3.2/AlleeSDK.podspec.json
{
"name": "AlleeSDK",
"swift_version": "4.2",
"version": "1.3.2",
"platforms": {
"ios": "8"
},
"license": {
"type": "MIT",
"file": "LICENSE"
},
"summary": "AlleeSDK help you to integrate your POS with our KDS",
"homepage": "https://github.com/Bematechus/AlleeSDK",
"authors": {
"Logic Controls": "<EMAIL>"
},
"source": {
"git": "https://github.com/Bematechus/AlleeSDK.git",
"tag": "1.3.2",
"submodules": true
},
"source_files": [
"AlleeSDK/AlleeSDK.h",
"AlleeSDK/AlleeSDK.swift",
"Frameworks/AlleeCommon/Models/*.swift",
"Frameworks/AlleeCommon/Messages/*.swift"
],
"vendored_frameworks": "Frameworks/BSocketHelper.framework"
}
| 349 |
342 | /*
* tkWin32Dll.c --
*
* This file contains a stub dll entry point.
*
* Copyright (c) 1995 Sun Microsystems, Inc.
*
* See the file "license.terms" for information on usage and redistribution of
* this file, and for a DISCLAIMER OF ALL WARRANTIES.
*/
#include "tkWinInt.h"
#ifndef STATIC_BUILD
#ifdef HAVE_NO_SEH
/*
* Unlike Borland and Microsoft, we don't register exception handlers by
* pushing registration records onto the runtime stack. Instead, we register
* them by creating an TCLEXCEPTION_REGISTRATION within the activation record.
*/
typedef struct TCLEXCEPTION_REGISTRATION {
struct TCLEXCEPTION_REGISTRATION *link;
EXCEPTION_DISPOSITION (*handler)(
struct _EXCEPTION_RECORD*, void*, struct _CONTEXT*, void*);
void *ebp;
void *esp;
int status;
} TCLEXCEPTION_REGISTRATION;
/*
* Need to add noinline flag to DllMain declaration so that gcc -O3 does not
* inline asm code into DllEntryPoint and cause a compile time error because
* of redefined local labels.
*/
BOOL APIENTRY DllMain(HINSTANCE hInst, DWORD reason,
LPVOID reserved) __attribute__ ((noinline));
#else /* !HAVE_NO_SEH */
/*
* The following declaration is for the VC++ DLL entry point.
*/
BOOL APIENTRY DllMain(HINSTANCE hInst, DWORD reason,
LPVOID reserved);
#endif /* HAVE_NO_SEH */
/*
*----------------------------------------------------------------------
*
* DllEntryPoint --
*
* This wrapper function is used by Borland to invoke the initialization
* code for Tk. It simply calls the DllMain routine.
*
* Results:
* See DllMain.
*
* Side effects:
* See DllMain.
*
*----------------------------------------------------------------------
*/
BOOL APIENTRY
DllEntryPoint(
HINSTANCE hInst, /* Library instance handle. */
DWORD reason, /* Reason this function is being called. */
LPVOID reserved) /* Not used. */
{
return DllMain(hInst, reason, reserved);
}
/*
*----------------------------------------------------------------------
*
* DllMain --
*
* DLL entry point. It is only necessary to specify our dll here so that
* resources are found correctly. Otherwise Tk will initialize and clean
* up after itself through other methods, in order to be consistent
* whether the build is static or dynamic.
*
* Results:
* Always TRUE.
*
* Side effects:
* This might call some synchronization functions, but MSDN documentation
* states: "Waiting on synchronization objects in DllMain can cause a
* deadlock."
*
*----------------------------------------------------------------------
*/
BOOL APIENTRY
DllMain(
HINSTANCE hInstance,
DWORD reason,
LPVOID reserved)
{
#ifdef HAVE_NO_SEH
TCLEXCEPTION_REGISTRATION registration;
#endif
/*
* If we are attaching to the DLL from a new process, tell Tk about the
* hInstance to use.
*/
switch (reason) {
case DLL_PROCESS_ATTACH:
DisableThreadLibraryCalls(hInstance);
TkWinSetHINSTANCE(hInstance);
break;
case DLL_PROCESS_DETACH:
/*
* Protect the call to TkFinalize in an SEH block. We can't be
* guarenteed Tk is always being unloaded from a stable condition.
*/
#ifdef HAVE_NO_SEH
# ifdef __WIN64
__asm__ __volatile__ (
/*
* Construct an TCLEXCEPTION_REGISTRATION to protect the call to
* TkFinalize
*/
"leaq %[registration], %%rdx" "\n\t"
"movq %%gs:0, %%rax" "\n\t"
"movq %%rax, 0x0(%%rdx)" "\n\t" /* link */
"leaq 1f, %%rax" "\n\t"
"movq %%rax, 0x8(%%rdx)" "\n\t" /* handler */
"movq %%rbp, 0x10(%%rdx)" "\n\t" /* rbp */
"movq %%rsp, 0x18(%%rdx)" "\n\t" /* rsp */
"movl %[error], 0x20(%%rdx)" "\n\t" /* status */
/*
* Link the TCLEXCEPTION_REGISTRATION on the chain
*/
"movq %%rdx, %%gs:0" "\n\t"
/*
* Call TkFinalize
*/
"movq $0x0, 0x0(%%esp)" "\n\t"
"call TkFinalize" "\n\t"
/*
* Come here on a normal exit. Recover the TCLEXCEPTION_REGISTRATION
* and store a TCL_OK status
*/
"movq %%gs:0, %%rdx" "\n\t"
"movl %[ok], %%eax" "\n\t"
"movl %%eax, 0x20(%%rdx)" "\n\t"
"jmp 2f" "\n"
/*
* Come here on an exception. Get the TCLEXCEPTION_REGISTRATION that
* we previously put on the chain.
*/
"1:" "\t"
"movq %%gs:0, %%rdx" "\n\t"
"movq 0x10(%%rdx), %%rdx" "\n\t"
/*
* Come here however we exited. Restore context from the
* TCLEXCEPTION_REGISTRATION in case the stack is unbalanced.
*/
"2:" "\t"
"movq 0x18(%%rdx), %%rsp" "\n\t"
"movq 0x10(%%rdx), %%rbp" "\n\t"
"movq 0x0(%%rdx), %%rax" "\n\t"
"movq %%rax, %%gs:0" "\n\t"
:
/* No outputs */
:
[registration] "m" (registration),
[ok] "i" (TCL_OK),
[error] "i" (TCL_ERROR)
:
"%rax", "%rbx", "%rcx", "%rdx", "%rsi", "%rdi", "memory"
);
# else
__asm__ __volatile__ (
/*
* Construct an TCLEXCEPTION_REGISTRATION to protect the call to
* TkFinalize
*/
"leal %[registration], %%edx" "\n\t"
"movl %%fs:0, %%eax" "\n\t"
"movl %%eax, 0x0(%%edx)" "\n\t" /* link */
"leal 1f, %%eax" "\n\t"
"movl %%eax, 0x4(%%edx)" "\n\t" /* handler */
"movl %%ebp, 0x8(%%edx)" "\n\t" /* ebp */
"movl %%esp, 0xc(%%edx)" "\n\t" /* esp */
"movl %[error], 0x10(%%edx)" "\n\t" /* status */
/*
* Link the TCLEXCEPTION_REGISTRATION on the chain
*/
"movl %%edx, %%fs:0" "\n\t"
/*
* Call TkFinalize
*/
"movl $0x0, 0x0(%%esp)" "\n\t"
"call _TkFinalize" "\n\t"
/*
* Come here on a normal exit. Recover the TCLEXCEPTION_REGISTRATION
* and store a TCL_OK status
*/
"movl %%fs:0, %%edx" "\n\t"
"movl %[ok], %%eax" "\n\t"
"movl %%eax, 0x10(%%edx)" "\n\t"
"jmp 2f" "\n"
/*
* Come here on an exception. Get the TCLEXCEPTION_REGISTRATION that
* we previously put on the chain.
*/
"1:" "\t"
"movl %%fs:0, %%edx" "\n\t"
"movl 0x8(%%edx), %%edx" "\n"
/*
* Come here however we exited. Restore context from the
* TCLEXCEPTION_REGISTRATION in case the stack is unbalanced.
*/
"2:" "\t"
"movl 0xc(%%edx), %%esp" "\n\t"
"movl 0x8(%%edx), %%ebp" "\n\t"
"movl 0x0(%%edx), %%eax" "\n\t"
"movl %%eax, %%fs:0" "\n\t"
:
/* No outputs */
:
[registration] "m" (registration),
[ok] "i" (TCL_OK),
[error] "i" (TCL_ERROR)
:
"%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"
);
# endif
#else /* HAVE_NO_SEH */
__try {
/*
* Run and remove our exit handlers, if they haven't already been
* run. Just in case we are being unloaded prior to Tcl (it can
* happen), we won't leave any dangling pointers hanging around
* for when Tcl gets unloaded later.
*/
TkFinalize(NULL);
} __except (EXCEPTION_EXECUTE_HANDLER) {
/* empty handler body. */
}
#endif
break;
}
return TRUE;
}
#endif /* !STATIC_BUILD */
/*
* Local Variables:
* mode: c
* c-basic-offset: 4
* fill-column: 78
* End:
*/
| 3,354 |
2,151 | /*
* Copyright (C) 2015 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package android.content.pm;
import android.annotation.NonNull;
import android.annotation.Nullable;
import android.graphics.drawable.Drawable;
import android.os.Parcel;
import android.os.Parcelable;
/**
* This class represents the state of an ephemeral app.
*
* @hide
*/
public final class EphemeralApplicationInfo implements Parcelable {
private final ApplicationInfo mApplicationInfo;
private final String mPackageName;
private final CharSequence mLabelText;
private final String[] mRequestedPermissions;
private final String[] mGrantedPermissions;
public EphemeralApplicationInfo(ApplicationInfo appInfo,
String[] requestedPermissions, String[] grantedPermissions) {
mApplicationInfo = appInfo;
mPackageName = null;
mLabelText = null;
mRequestedPermissions = requestedPermissions;
mGrantedPermissions = grantedPermissions;
}
public EphemeralApplicationInfo(String packageName, CharSequence label,
String[] requestedPermissions, String[] grantedPermissions) {
mApplicationInfo = null;
mPackageName = packageName;
mLabelText = label;
mRequestedPermissions = requestedPermissions;
mGrantedPermissions = grantedPermissions;
}
private EphemeralApplicationInfo(Parcel parcel) {
mPackageName = parcel.readString();
mLabelText = parcel.readCharSequence();
mRequestedPermissions = parcel.readStringArray();
mGrantedPermissions = parcel.createStringArray();
mApplicationInfo = parcel.readParcelable(null);
}
public @NonNull String getPackageName() {
if (mApplicationInfo != null) {
return mApplicationInfo.packageName;
}
return mPackageName;
}
public @NonNull CharSequence loadLabel(@NonNull PackageManager packageManager) {
if (mApplicationInfo != null) {
return mApplicationInfo.loadLabel(packageManager);
}
return mLabelText;
}
public @NonNull Drawable loadIcon(@NonNull PackageManager packageManager) {
if (mApplicationInfo != null) {
return mApplicationInfo.loadIcon(packageManager);
}
return packageManager.getEphemeralApplicationIcon(mPackageName);
}
public @Nullable String[] getRequestedPermissions() {
return mRequestedPermissions;
}
public @Nullable String[] getGrantedPermissions() {
return mGrantedPermissions;
}
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel parcel, int flags) {
parcel.writeString(mPackageName);
parcel.writeCharSequence(mLabelText);
parcel.writeStringArray(mRequestedPermissions);
parcel.writeStringArray(mGrantedPermissions);
parcel.writeParcelable(mApplicationInfo, flags);
}
public static final Creator<EphemeralApplicationInfo> CREATOR =
new Creator<EphemeralApplicationInfo>() {
@Override
public EphemeralApplicationInfo createFromParcel(Parcel parcel) {
return new EphemeralApplicationInfo(parcel);
}
@Override
public EphemeralApplicationInfo[] newArray(int size) {
return new EphemeralApplicationInfo[0];
}
};
}
| 1,373 |
678 | <gh_stars>100-1000
/**
* This header is generated by class-dump-z 0.2b.
*
* Source: /System/Library/PrivateFrameworks/ToneLibrary.framework/ToneLibrary
*/
#import <ToneLibrary/XXUnknownSuperclass.h>
#import <ToneLibrary/ToneLibrary-Structs.h>
#import <ToneLibrary/UITextFieldDelegate.h>
#import <ToneLibrary/TLVibrationRecorderViewDelegate.h>
#import <ToneLibrary/UIAlertViewDelegate.h>
@class TLVibrationPattern, TLVibratorController, UIBarButtonItem, UIAlertView, NSDictionary;
@protocol TLVibrationRecorderViewControllerDelegate;
@interface TLVibrationRecorderViewController : XXUnknownSuperclass <UITextFieldDelegate, TLVibrationRecorderViewDelegate, UIAlertViewDelegate> {
id<TLVibrationRecorderViewControllerDelegate> _delegate; // 152 = 0x98
TLVibratorController *_vibratorController; // 156 = 0x9c
NSDictionary *_indefiniteVibrationPattern; // 160 = 0xa0
unsigned _mode; // 164 = 0xa4
TLVibrationPattern *_recordedVibrationPattern; // 168 = 0xa8
double _currentVibrationComponentDidStartTime; // 172 = 0xac
BOOL _isWaitingForEndOfCurrentVibrationComponent; // 180 = 0xb4
double _currentVibrationProgressDidStartTimestamp; // 184 = 0xb8
UIBarButtonItem *_cancelButton; // 192 = 0xc0
UIBarButtonItem *_saveButton; // 196 = 0xc4
UIAlertView *_vibrationNameAlertView; // 200 = 0xc8
}
@property(retain, nonatomic, setter=_setVibrationNameAlertView:) UIAlertView *_vibrationNameAlertView; // G=0x11d99; S=0x11da9; @synthesize
@property(retain, nonatomic, setter=_setSaveButton:) UIBarButtonItem *_saveButton; // G=0x11d65; S=0x11d75; @synthesize
@property(retain, nonatomic, setter=_setCancelButton:) UIBarButtonItem *_cancelButton; // G=0x11d31; S=0x11d41; @synthesize
@property(retain, nonatomic, setter=_setRecordedVibrationPattern:) TLVibrationPattern *_recordedVibrationPattern; // G=0x11cfd; S=0x11d0d; @synthesize
@property(retain, nonatomic, setter=_setIndefiniteVibrationPattern:) NSDictionary *_indefiniteVibrationPattern; // G=0x10a0d; S=0x11cd9; @synthesize
@property(assign, nonatomic) id<TLVibrationRecorderViewControllerDelegate> delegate; // G=0x11cb9; S=0x11cc9; @synthesize=_delegate
// declared property setter: - (void)_setVibrationNameAlertView:(id)view; // 0x11da9
// declared property getter: - (id)_vibrationNameAlertView; // 0x11d99
// declared property setter: - (void)_setSaveButton:(id)button; // 0x11d75
// declared property getter: - (id)_saveButton; // 0x11d65
// declared property setter: - (void)_setCancelButton:(id)button; // 0x11d41
// declared property getter: - (id)_cancelButton; // 0x11d31
// declared property setter: - (void)_setRecordedVibrationPattern:(id)pattern; // 0x11d0d
// declared property getter: - (id)_recordedVibrationPattern; // 0x11cfd
// declared property setter: - (void)_setIndefiniteVibrationPattern:(id)pattern; // 0x11cd9
// declared property setter: - (void)setDelegate:(id)delegate; // 0x11cc9
// declared property getter: - (id)delegate; // 0x11cb9
- (void)_accessibilityDidExitReplayMode; // 0x11c9d
- (void)_accessibilityDidEnterReplayMode; // 0x11c81
- (void)_accessibilityDidExitRecordingMode; // 0x11c65
- (void)_accessibilityDidEnterRecordingMode; // 0x11c49
- (void)_accessibilityMakeAnnouncementWithStringForLocalizationIdentifier:(id)localizationIdentifier; // 0x11bb5
- (void)_finishedWithRecorder; // 0x11b4d
- (void)vibrationRecorderViewDidReachVibrationRecordingMaximumDuration:(id)vibrationRecorderView; // 0x11b0d
- (void)vibrationRecorderView:(id)view didExitRecordingModeWithContextObject:(id)contextObject; // 0x11ac9
- (BOOL)vibrationRecorderViewDidEnterRecordingMode:(id)vibrationRecorderView; // 0x11a89
- (void)vibrationRecorderViewDidFinishReplayingVibration:(id)vibrationRecorderView; // 0x11a1d
- (void)_vibrationRecorderView:(id)view buttonTappedWithIdentifier:(unsigned)identifier animateButtonUpdate:(BOOL)update; // 0x11649
- (void)vibrationRecorderView:(id)view buttonTappedWithIdentifier:(unsigned)identifier; // 0x11625
- (void)vibrationComponentDidEndForVibrationRecorderView:(id)vibrationComponent; // 0x115e1
- (void)vibrationComponentDidStartForVibrationRecorderView:(id)vibrationComponent; // 0x114f1
- (void)_eraseCurrentVibrationComponentDidStartTime; // 0x114d9
- (void)_storeVibrationComponentOfTypePause:(BOOL)typePause; // 0x11441
- (void)_stopVibrating; // 0x11421
- (void)_startVibratingWithVibrationPattern:(id)vibrationPattern; // 0x113e1
- (void)alertView:(id)view clickedButtonAtIndex:(int)index; // 0x1129d
- (BOOL)textFieldShouldReturn:(id)textField; // 0x1120d
- (BOOL)textField:(id)field shouldChangeCharactersInRange:(NSRange)range replacementString:(id)string; // 0x111ad
- (void)_textFieldTextDidChange:(id)_textFieldText; // 0x11101
- (void)_saveButtonTapped:(id)tapped; // 0x10ee5
- (void)_cancelButtonTapped:(id)tapped; // 0x10eb5
- (BOOL)shouldAutorotateToInterfaceOrientation:(int)interfaceOrientation; // 0x10ea9
- (void)viewDidUnload; // 0x10e55
- (void)viewDidDisappear:(BOOL)view; // 0x10dd9
- (void)viewWillAppear:(BOOL)view; // 0x10c11
- (void)viewWillDisappear:(BOOL)view; // 0x10bb9
- (void)viewDidAppear:(BOOL)view; // 0x10b79
- (void)loadView; // 0x10a91
// declared property getter: - (id)_indefiniteVibrationPattern; // 0x10a0d
- (void)dealloc; // 0x1092d
- (id)initWithVibratorController:(id)vibratorController; // 0x108e9
@end
| 1,993 |
1,305 | <reponame>suxingjie99/JavaSource
/*
* Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved.
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*/
package java.awt;
import java.awt.event.*;
import java.awt.geom.Path2D;
import java.awt.geom.Point2D;
import java.awt.im.InputContext;
import java.awt.image.BufferStrategy;
import java.awt.image.BufferedImage;
import java.awt.peer.ComponentPeer;
import java.awt.peer.WindowPeer;
import java.beans.PropertyChangeListener;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OptionalDataException;
import java.io.Serializable;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.security.AccessController;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EventListener;
import java.util.Locale;
import java.util.ResourceBundle;
import java.util.Set;
import java.util.Vector;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.accessibility.*;
import sun.awt.AWTAccessor;
import sun.awt.AppContext;
import sun.awt.CausedFocusEvent;
import sun.awt.SunToolkit;
import sun.awt.util.IdentityArrayList;
import sun.java2d.Disposer;
import sun.java2d.pipe.Region;
import sun.security.action.GetPropertyAction;
import sun.security.util.SecurityConstants;
import sun.util.logging.PlatformLogger;
/**
* A {@code Window} object is a top-level window with no borders and no
* menubar.
* The default layout for a window is {@code BorderLayout}.
* <p>
* A window must have either a frame, dialog, or another window defined as its
* owner when it's constructed.
* <p>
* In a multi-screen environment, you can create a {@code Window}
* on a different screen device by constructing the {@code Window}
* with {@link #Window(Window, GraphicsConfiguration)}. The
* {@code GraphicsConfiguration} object is one of the
* {@code GraphicsConfiguration} objects of the target screen device.
* <p>
* In a virtual device multi-screen environment in which the desktop
* area could span multiple physical screen devices, the bounds of all
* configurations are relative to the virtual device coordinate system.
* The origin of the virtual-coordinate system is at the upper left-hand
* corner of the primary physical screen. Depending on the location of
* the primary screen in the virtual device, negative coordinates are
* possible, as shown in the following figure.
* <p>
* <img src="doc-files/MultiScreen.gif"
* alt="Diagram shows virtual device containing 4 physical screens. Primary physical screen shows coords (0,0), other screen shows (-80,-100)."
* style="float:center; margin: 7px 10px;">
* <p>
* In such an environment, when calling {@code setLocation},
* you must pass a virtual coordinate to this method. Similarly,
* calling {@code getLocationOnScreen} on a {@code Window} returns
* virtual device coordinates. Call the {@code getBounds} method
* of a {@code GraphicsConfiguration} to find its origin in the virtual
* coordinate system.
* <p>
* The following code sets the location of a {@code Window}
* at (10, 10) relative to the origin of the physical screen
* of the corresponding {@code GraphicsConfiguration}. If the
* bounds of the {@code GraphicsConfiguration} is not taken
* into account, the {@code Window} location would be set
* at (10, 10) relative to the virtual-coordinate system and would appear
* on the primary physical screen, which might be different from the
* physical screen of the specified {@code GraphicsConfiguration}.
*
* <pre>
* Window w = new Window(Window owner, GraphicsConfiguration gc);
* Rectangle bounds = gc.getBounds();
* w.setLocation(10 + bounds.x, 10 + bounds.y);
* </pre>
*
* <p>
* Note: the location and size of top-level windows (including
* {@code Window}s, {@code Frame}s, and {@code Dialog}s)
* are under the control of the desktop's window management system.
* Calls to {@code setLocation}, {@code setSize}, and
* {@code setBounds} are requests (not directives) which are
* forwarded to the window management system. Every effort will be
* made to honor such requests. However, in some cases the window
* management system may ignore such requests, or modify the requested
* geometry in order to place and size the {@code Window} in a way
* that more closely matches the desktop settings.
* <p>
* Due to the asynchronous nature of native event handling, the results
* returned by {@code getBounds}, {@code getLocation},
* {@code getLocationOnScreen}, and {@code getSize} might not
* reflect the actual geometry of the Window on screen until the last
* request has been processed. During the processing of subsequent
* requests these values might change accordingly while the window
* management system fulfills the requests.
* <p>
* An application may set the size and location of an invisible
* {@code Window} arbitrarily, but the window management system may
* subsequently change its size and/or location when the
* {@code Window} is made visible. One or more {@code ComponentEvent}s
* will be generated to indicate the new geometry.
* <p>
* Windows are capable of generating the following WindowEvents:
* WindowOpened, WindowClosed, WindowGainedFocus, WindowLostFocus.
*
* @author <NAME>
* @author <NAME>
* @see WindowEvent
* @see #addWindowListener
* @see java.awt.BorderLayout
* @since JDK1.0
*/
public class Window extends Container implements Accessible {
/**
* Enumeration of available <i>window types</i>.
*
* A window type defines the generic visual appearance and behavior of a
* top-level window. For example, the type may affect the kind of
* decorations of a decorated {@code Frame} or {@code Dialog} instance.
* <p>
* Some platforms may not fully support a certain window type. Depending on
* the level of support, some properties of the window type may be
* disobeyed.
*
* @see #getType
* @see #setType
* @since 1.7
*/
public static enum Type {
/**
* Represents a <i>normal</i> window.
*
* This is the default type for objects of the {@code Window} class or
* its descendants. Use this type for regular top-level windows.
*/
NORMAL,
/**
* Represents a <i>utility</i> window.
*
* A utility window is usually a small window such as a toolbar or a
* palette. The native system may render the window with smaller
* title-bar if the window is either a {@code Frame} or a {@code
* Dialog} object, and if it has its decorations enabled.
*/
UTILITY,
/**
* Represents a <i>popup</i> window.
*
* A popup window is a temporary window such as a drop-down menu or a
* tooltip. On some platforms, windows of that type may be forcibly
* made undecorated even if they are instances of the {@code Frame} or
* {@code Dialog} class, and have decorations enabled.
*/
POPUP
}
/**
* This represents the warning message that is
* to be displayed in a non secure window. ie :
* a window that has a security manager installed that denies
* {@code AWTPermission("showWindowWithoutWarningBanner")}.
* This message can be displayed anywhere in the window.
*
* @serial
* @see #getWarningString
*/
String warningString;
/**
* {@code icons} is the graphical way we can
* represent the frames and dialogs.
* {@code Window} can't display icon but it's
* being inherited by owned {@code Dialog}s.
*
* @serial
* @see #getIconImages
* @see #setIconImages
*/
transient java.util.List<Image> icons;
/**
* Holds the reference to the component which last had focus in this window
* before it lost focus.
*/
private transient Component temporaryLostComponent;
static boolean systemSyncLWRequests = false;
boolean syncLWRequests = false;
transient boolean beforeFirstShow = true;
private transient boolean disposing = false;
transient WindowDisposerRecord disposerRecord = null;
static final int OPENED = 0x01;
/**
* An Integer value representing the Window State.
*
* @serial
* @since 1.2
* @see #show
*/
int state;
/**
* A boolean value representing Window always-on-top state
* @since 1.5
* @serial
* @see #setAlwaysOnTop
* @see #isAlwaysOnTop
*/
private boolean alwaysOnTop;
/**
* Contains all the windows that have a peer object associated,
* i. e. between addNotify() and removeNotify() calls. The list
* of all Window instances can be obtained from AppContext object.
*
* @since 1.6
*/
private static final IdentityArrayList<Window> allWindows = new IdentityArrayList<Window>();
/**
* A vector containing all the windows this
* window currently owns.
* @since 1.2
* @see #getOwnedWindows
*/
transient Vector<WeakReference<Window>> ownedWindowList =
new Vector<WeakReference<Window>>();
/*
* We insert a weak reference into the Vector of all Windows in AppContext
* instead of 'this' so that garbage collection can still take place
* correctly.
*/
private transient WeakReference<Window> weakThis;
transient boolean showWithParent;
/**
* Contains the modal dialog that blocks this window, or null
* if the window is unblocked.
*
* @since 1.6
*/
transient Dialog modalBlocker;
/**
* @serial
*
* @see java.awt.Dialog.ModalExclusionType
* @see #getModalExclusionType
* @see #setModalExclusionType
*
* @since 1.6
*/
Dialog.ModalExclusionType modalExclusionType;
transient WindowListener windowListener;
transient WindowStateListener windowStateListener;
transient WindowFocusListener windowFocusListener;
transient InputContext inputContext;
private transient Object inputContextLock = new Object();
/**
* Unused. Maintained for serialization backward-compatibility.
*
* @serial
* @since 1.2
*/
private FocusManager focusMgr;
/**
* Indicates whether this Window can become the focused Window.
*
* @serial
* @see #getFocusableWindowState
* @see #setFocusableWindowState
* @since 1.4
*/
private boolean focusableWindowState = true;
/**
* Indicates whether this window should receive focus on
* subsequently being shown (with a call to {@code setVisible(true)}), or
* being moved to the front (with a call to {@code toFront()}).
*
* @serial
* @see #setAutoRequestFocus
* @see #isAutoRequestFocus
* @since 1.7
*/
private volatile boolean autoRequestFocus = true;
/*
* Indicates that this window is being shown. This flag is set to true at
* the beginning of show() and to false at the end of show().
*
* @see #show()
* @see Dialog#shouldBlock
*/
transient boolean isInShow = false;
/**
* The opacity level of the window
*
* @serial
* @see #setOpacity(float)
* @see #getOpacity()
* @since 1.7
*/
private volatile float opacity = 1.0f;
/**
* The shape assigned to this window. This field is set to {@code null} if
* no shape is set (rectangular window).
*
* @serial
* @see #getShape()
* @see #setShape(Shape)
* @since 1.7
*/
private Shape shape = null;
private static final String base = "win";
private static int nameCounter = 0;
/*
* JDK 1.1 serialVersionUID
*/
private static final long serialVersionUID = 4497834738069338734L;
private static final PlatformLogger log = PlatformLogger.getLogger("java.awt.Window");
private static final boolean locationByPlatformProp;
transient boolean isTrayIconWindow = false;
/**
* These fields are initialized in the native peer code
* or via AWTAccessor's WindowAccessor.
*/
private transient volatile int securityWarningWidth = 0;
private transient volatile int securityWarningHeight = 0;
/**
* These fields represent the desired location for the security
* warning if this window is untrusted.
* See com.sun.awt.SecurityWarning for more details.
*/
private transient double securityWarningPointX = 2.0;
private transient double securityWarningPointY = 0.0;
private transient float securityWarningAlignmentX = RIGHT_ALIGNMENT;
private transient float securityWarningAlignmentY = TOP_ALIGNMENT;
static {
/* ensure that the necessary native libraries are loaded */
Toolkit.loadLibraries();
if (!GraphicsEnvironment.isHeadless()) {
initIDs();
}
String s = java.security.AccessController.doPrivileged(
new GetPropertyAction("java.awt.syncLWRequests"));
systemSyncLWRequests = (s != null && s.equals("true"));
s = java.security.AccessController.doPrivileged(
new GetPropertyAction("java.awt.Window.locationByPlatform"));
locationByPlatformProp = (s != null && s.equals("true"));
}
/**
* Initialize JNI field and method IDs for fields that may be
accessed from C.
*/
private static native void initIDs();
/**
* Constructs a new, initially invisible window in default size with the
* specified {@code GraphicsConfiguration}.
* <p>
* If there is a security manager, then it is invoked to check
* {@code AWTPermission("showWindowWithoutWarningBanner")}
* to determine whether or not the window must be displayed with
* a warning banner.
*
* @param gc the {@code GraphicsConfiguration} of the target screen
* device. If {@code gc} is {@code null}, the system default
* {@code GraphicsConfiguration} is assumed
* @exception IllegalArgumentException if {@code gc}
* is not from a screen device
* @exception HeadlessException when
* {@code GraphicsEnvironment.isHeadless()} returns {@code true}
*
* @see java.awt.GraphicsEnvironment#isHeadless
*/
Window(GraphicsConfiguration gc) {
init(gc);
}
transient Object anchor = new Object();
static class WindowDisposerRecord implements sun.java2d.DisposerRecord {
WeakReference<Window> owner;
final WeakReference<Window> weakThis;
final WeakReference<AppContext> context;
WindowDisposerRecord(AppContext context, Window victim) {
weakThis = victim.weakThis;
this.context = new WeakReference<AppContext>(context);
}
public void updateOwner() {
Window victim = weakThis.get();
owner = (victim == null)
? null
: new WeakReference<Window>(victim.getOwner());
}
public void dispose() {
if (owner != null) {
Window parent = owner.get();
if (parent != null) {
parent.removeOwnedWindow(weakThis);
}
}
AppContext ac = context.get();
if (null != ac) {
Window.removeFromWindowList(ac, weakThis);
}
}
}
private GraphicsConfiguration initGC(GraphicsConfiguration gc) {
GraphicsEnvironment.checkHeadless();
if (gc == null) {
gc = GraphicsEnvironment.getLocalGraphicsEnvironment().
getDefaultScreenDevice().getDefaultConfiguration();
}
setGraphicsConfiguration(gc);
return gc;
}
private void init(GraphicsConfiguration gc) {
GraphicsEnvironment.checkHeadless();
syncLWRequests = systemSyncLWRequests;
weakThis = new WeakReference<Window>(this);
addToWindowList();
setWarningString();
this.cursor = Cursor.getPredefinedCursor(Cursor.DEFAULT_CURSOR);
this.visible = false;
gc = initGC(gc);
if (gc.getDevice().getType() !=
GraphicsDevice.TYPE_RASTER_SCREEN) {
throw new IllegalArgumentException("not a screen device");
}
setLayout(new BorderLayout());
/* offset the initial location with the original of the screen */
/* and any insets */
Rectangle screenBounds = gc.getBounds();
Insets screenInsets = getToolkit().getScreenInsets(gc);
int x = getX() + screenBounds.x + screenInsets.left;
int y = getY() + screenBounds.y + screenInsets.top;
if (x != this.x || y != this.y) {
setLocation(x, y);
/* reset after setLocation */
setLocationByPlatform(locationByPlatformProp);
}
modalExclusionType = Dialog.ModalExclusionType.NO_EXCLUDE;
disposerRecord = new WindowDisposerRecord(appContext, this);
sun.java2d.Disposer.addRecord(anchor, disposerRecord);
SunToolkit.checkAndSetPolicy(this);
}
/**
* Constructs a new, initially invisible window in the default size.
* <p>
* If there is a security manager set, it is invoked to check
* {@code AWTPermission("showWindowWithoutWarningBanner")}.
* If that check fails with a {@code SecurityException} then a warning
* banner is created.
*
* @exception HeadlessException when
* {@code GraphicsEnvironment.isHeadless()} returns {@code true}
*
* @see java.awt.GraphicsEnvironment#isHeadless
*/
Window() throws HeadlessException {
GraphicsEnvironment.checkHeadless();
init((GraphicsConfiguration)null);
}
/**
* Constructs a new, initially invisible window with the specified
* {@code Frame} as its owner. The window will not be focusable
* unless its owner is showing on the screen.
* <p>
* If there is a security manager set, it is invoked to check
* {@code AWTPermission("showWindowWithoutWarningBanner")}.
* If that check fails with a {@code SecurityException} then a warning
* banner is created.
*
* @param owner the {@code Frame} to act as owner or {@code null}
* if this window has no owner
* @exception IllegalArgumentException if the {@code owner}'s
* {@code GraphicsConfiguration} is not from a screen device
* @exception HeadlessException when
* {@code GraphicsEnvironment.isHeadless} returns {@code true}
*
* @see java.awt.GraphicsEnvironment#isHeadless
* @see #isShowing
*/
public Window(Frame owner) {
this(owner == null ? (GraphicsConfiguration)null :
owner.getGraphicsConfiguration());
ownedInit(owner);
}
/**
* Constructs a new, initially invisible window with the specified
* {@code Window} as its owner. This window will not be focusable
* unless its nearest owning {@code Frame} or {@code Dialog}
* is showing on the screen.
* <p>
* If there is a security manager set, it is invoked to check
* {@code AWTPermission("showWindowWithoutWarningBanner")}.
* If that check fails with a {@code SecurityException} then a
* warning banner is created.
*
* @param owner the {@code Window} to act as owner or
* {@code null} if this window has no owner
* @exception IllegalArgumentException if the {@code owner}'s
* {@code GraphicsConfiguration} is not from a screen device
* @exception HeadlessException when
* {@code GraphicsEnvironment.isHeadless()} returns
* {@code true}
*
* @see java.awt.GraphicsEnvironment#isHeadless
* @see #isShowing
*
* @since 1.2
*/
public Window(Window owner) {
this(owner == null ? (GraphicsConfiguration)null :
owner.getGraphicsConfiguration());
ownedInit(owner);
}
/**
* Constructs a new, initially invisible window with the specified owner
* {@code Window} and a {@code GraphicsConfiguration}
* of a screen device. The Window will not be focusable unless
* its nearest owning {@code Frame} or {@code Dialog}
* is showing on the screen.
* <p>
* If there is a security manager set, it is invoked to check
* {@code AWTPermission("showWindowWithoutWarningBanner")}. If that
* check fails with a {@code SecurityException} then a warning banner
* is created.
*
* @param owner the window to act as owner or {@code null}
* if this window has no owner
* @param gc the {@code GraphicsConfiguration} of the target
* screen device; if {@code gc} is {@code null},
* the system default {@code GraphicsConfiguration} is assumed
* @exception IllegalArgumentException if {@code gc}
* is not from a screen device
* @exception HeadlessException when
* {@code GraphicsEnvironment.isHeadless()} returns
* {@code true}
*
* @see java.awt.GraphicsEnvironment#isHeadless
* @see GraphicsConfiguration#getBounds
* @see #isShowing
* @since 1.3
*/
public Window(Window owner, GraphicsConfiguration gc) {
this(gc);
ownedInit(owner);
}
private void ownedInit(Window owner) {
this.parent = owner;
if (owner != null) {
owner.addOwnedWindow(weakThis);
if (owner.isAlwaysOnTop()) {
try {
setAlwaysOnTop(true);
} catch (SecurityException ignore) {
}
}
}
// WindowDisposerRecord requires a proper value of parent field.
disposerRecord.updateOwner();
}
/**
* Construct a name for this component. Called by getName() when the
* name is null.
*/
String constructComponentName() {
synchronized (Window.class) {
return base + nameCounter++;
}
}
/**
* Returns the sequence of images to be displayed as the icon for this window.
* <p>
* This method returns a copy of the internally stored list, so all operations
* on the returned object will not affect the window's behavior.
*
* @return the copy of icon images' list for this window, or
* empty list if this window doesn't have icon images.
* @see #setIconImages
* @see #setIconImage(Image)
* @since 1.6
*/
public java.util.List<Image> getIconImages() {
java.util.List<Image> icons = this.icons;
if (icons == null || icons.size() == 0) {
return new ArrayList<Image>();
}
return new ArrayList<Image>(icons);
}
/**
* Sets the sequence of images to be displayed as the icon
* for this window. Subsequent calls to {@code getIconImages} will
* always return a copy of the {@code icons} list.
* <p>
* Depending on the platform capabilities one or several images
* of different dimensions will be used as the window's icon.
* <p>
* The {@code icons} list is scanned for the images of most
* appropriate dimensions from the beginning. If the list contains
* several images of the same size, the first will be used.
* <p>
* Ownerless windows with no icon specified use platfrom-default icon.
* The icon of an owned window may be inherited from the owner
* unless explicitly overridden.
* Setting the icon to {@code null} or empty list restores
* the default behavior.
* <p>
* Note : Native windowing systems may use different images of differing
* dimensions to represent a window, depending on the context (e.g.
* window decoration, window list, taskbar, etc.). They could also use
* just a single image for all contexts or no image at all.
*
* @param icons the list of icon images to be displayed.
* @see #getIconImages()
* @see #setIconImage(Image)
* @since 1.6
*/
public synchronized void setIconImages(java.util.List<? extends Image> icons) {
this.icons = (icons == null) ? new ArrayList<Image>() :
new ArrayList<Image>(icons);
WindowPeer peer = (WindowPeer)this.peer;
if (peer != null) {
peer.updateIconImages();
}
// Always send a property change event
firePropertyChange("iconImage", null, null);
}
/**
* Sets the image to be displayed as the icon for this window.
* <p>
* This method can be used instead of {@link #setIconImages setIconImages()}
* to specify a single image as a window's icon.
* <p>
* The following statement:
* <pre>
* setIconImage(image);
* </pre>
* is equivalent to:
* <pre>
* ArrayList<Image> imageList = new ArrayList<Image>();
* imageList.add(image);
* setIconImages(imageList);
* </pre>
* <p>
* Note : Native windowing systems may use different images of differing
* dimensions to represent a window, depending on the context (e.g.
* window decoration, window list, taskbar, etc.). They could also use
* just a single image for all contexts or no image at all.
*
* @param image the icon image to be displayed.
* @see #setIconImages
* @see #getIconImages()
* @since 1.6
*/
public void setIconImage(Image image) {
ArrayList<Image> imageList = new ArrayList<Image>();
if (image != null) {
imageList.add(image);
}
setIconImages(imageList);
}
/**
* Makes this Window displayable by creating the connection to its
* native screen resource.
* This method is called internally by the toolkit and should
* not be called directly by programs.
* @see Component#isDisplayable
* @see Container#removeNotify
* @since JDK1.0
*/
public void addNotify() {
synchronized (getTreeLock()) {
Container parent = this.parent;
if (parent != null && parent.getPeer() == null) {
parent.addNotify();
}
if (peer == null) {
peer = getToolkit().createWindow(this);
}
synchronized (allWindows) {
allWindows.add(this);
}
super.addNotify();
}
}
/**
* {@inheritDoc}
*/
public void removeNotify() {
synchronized (getTreeLock()) {
synchronized (allWindows) {
allWindows.remove(this);
}
super.removeNotify();
}
}
/**
* Causes this Window to be sized to fit the preferred size
* and layouts of its subcomponents. The resulting width and
* height of the window are automatically enlarged if either
* of dimensions is less than the minimum size as specified
* by the previous call to the {@code setMinimumSize} method.
* <p>
* If the window and/or its owner are not displayable yet,
* both of them are made displayable before calculating
* the preferred size. The Window is validated after its
* size is being calculated.
*
* @see Component#isDisplayable
* @see #setMinimumSize
*/
public void pack() {
Container parent = this.parent;
if (parent != null && parent.getPeer() == null) {
parent.addNotify();
}
if (peer == null) {
addNotify();
}
Dimension newSize = getPreferredSize();
if (peer != null) {
setClientSize(newSize.width, newSize.height);
}
if(beforeFirstShow) {
isPacked = true;
}
validateUnconditionally();
}
/**
* Sets the minimum size of this window to a constant
* value. Subsequent calls to {@code getMinimumSize}
* will always return this value. If current window's
* size is less than {@code minimumSize} the size of the
* window is automatically enlarged to honor the minimum size.
* <p>
* If the {@code setSize} or {@code setBounds} methods
* are called afterwards with a width or height less than
* that was specified by the {@code setMinimumSize} method
* the window is automatically enlarged to meet
* the {@code minimumSize} value. The {@code minimumSize}
* value also affects the behaviour of the {@code pack} method.
* <p>
* The default behavior is restored by setting the minimum size
* parameter to the {@code null} value.
* <p>
* Resizing operation may be restricted if the user tries
* to resize window below the {@code minimumSize} value.
* This behaviour is platform-dependent.
*
* @param minimumSize the new minimum size of this window
* @see Component#setMinimumSize
* @see #getMinimumSize
* @see #isMinimumSizeSet
* @see #setSize(Dimension)
* @see #pack
* @since 1.6
*/
public void setMinimumSize(Dimension minimumSize) {
synchronized (getTreeLock()) {
super.setMinimumSize(minimumSize);
Dimension size = getSize();
if (isMinimumSizeSet()) {
if (size.width < minimumSize.width || size.height < minimumSize.height) {
int nw = Math.max(width, minimumSize.width);
int nh = Math.max(height, minimumSize.height);
setSize(nw, nh);
}
}
if (peer != null) {
((WindowPeer)peer).updateMinimumSize();
}
}
}
/**
* {@inheritDoc}
* <p>
* The {@code d.width} and {@code d.height} values
* are automatically enlarged if either is less than
* the minimum size as specified by previous call to
* {@code setMinimumSize}.
* <p>
* The method changes the geometry-related data. Therefore,
* the native windowing system may ignore such requests, or it may modify
* the requested data, so that the {@code Window} object is placed and sized
* in a way that corresponds closely to the desktop settings.
*
* @see #getSize
* @see #setBounds
* @see #setMinimumSize
* @since 1.6
*/
public void setSize(Dimension d) {
super.setSize(d);
}
/**
* {@inheritDoc}
* <p>
* The {@code width} and {@code height} values
* are automatically enlarged if either is less than
* the minimum size as specified by previous call to
* {@code setMinimumSize}.
* <p>
* The method changes the geometry-related data. Therefore,
* the native windowing system may ignore such requests, or it may modify
* the requested data, so that the {@code Window} object is placed and sized
* in a way that corresponds closely to the desktop settings.
*
* @see #getSize
* @see #setBounds
* @see #setMinimumSize
* @since 1.6
*/
public void setSize(int width, int height) {
super.setSize(width, height);
}
/**
* {@inheritDoc}
* <p>
* The method changes the geometry-related data. Therefore,
* the native windowing system may ignore such requests, or it may modify
* the requested data, so that the {@code Window} object is placed and sized
* in a way that corresponds closely to the desktop settings.
*/
@Override
public void setLocation(int x, int y) {
super.setLocation(x, y);
}
/**
* {@inheritDoc}
* <p>
* The method changes the geometry-related data. Therefore,
* the native windowing system may ignore such requests, or it may modify
* the requested data, so that the {@code Window} object is placed and sized
* in a way that corresponds closely to the desktop settings.
*/
@Override
public void setLocation(Point p) {
super.setLocation(p);
}
/**
* @deprecated As of JDK version 1.1,
* replaced by {@code setBounds(int, int, int, int)}.
*/
@Deprecated
public void reshape(int x, int y, int width, int height) {
if (isMinimumSizeSet()) {
Dimension minSize = getMinimumSize();
if (width < minSize.width) {
width = minSize.width;
}
if (height < minSize.height) {
height = minSize.height;
}
}
super.reshape(x, y, width, height);
}
void setClientSize(int w, int h) {
synchronized (getTreeLock()) {
setBoundsOp(ComponentPeer.SET_CLIENT_SIZE);
setBounds(x, y, w, h);
}
}
static private final AtomicBoolean
beforeFirstWindowShown = new AtomicBoolean(true);
final void closeSplashScreen() {
if (isTrayIconWindow) {
return;
}
if (beforeFirstWindowShown.getAndSet(false)) {
// We don't use SplashScreen.getSplashScreen() to avoid instantiating
// the object if it hasn't been requested by user code explicitly
SunToolkit.closeSplashScreen();
SplashScreen.markClosed();
}
}
/**
* Shows or hides this {@code Window} depending on the value of parameter
* {@code b}.
* <p>
* If the method shows the window then the window is also made
* focused under the following conditions:
* <ul>
* <li> The {@code Window} meets the requirements outlined in the
* {@link #isFocusableWindow} method.
* <li> The {@code Window}'s {@code autoRequestFocus} property is of the {@code true} value.
* <li> Native windowing system allows the {@code Window} to get focused.
* </ul>
* There is an exception for the second condition (the value of the
* {@code autoRequestFocus} property). The property is not taken into account if the
* window is a modal dialog, which blocks the currently focused window.
* <p>
* Developers must never assume that the window is the focused or active window
* until it receives a WINDOW_GAINED_FOCUS or WINDOW_ACTIVATED event.
* @param b if {@code true}, makes the {@code Window} visible,
* otherwise hides the {@code Window}.
* If the {@code Window} and/or its owner
* are not yet displayable, both are made displayable. The
* {@code Window} will be validated prior to being made visible.
* If the {@code Window} is already visible, this will bring the
* {@code Window} to the front.<p>
* If {@code false}, hides this {@code Window}, its subcomponents, and all
* of its owned children.
* The {@code Window} and its subcomponents can be made visible again
* with a call to {@code #setVisible(true)}.
* @see java.awt.Component#isDisplayable
* @see java.awt.Component#setVisible
* @see java.awt.Window#toFront
* @see java.awt.Window#dispose
* @see java.awt.Window#setAutoRequestFocus
* @see java.awt.Window#isFocusableWindow
*/
public void setVisible(boolean b) {
super.setVisible(b);
}
/**
* Makes the Window visible. If the Window and/or its owner
* are not yet displayable, both are made displayable. The
* Window will be validated prior to being made visible.
* If the Window is already visible, this will bring the Window
* to the front.
* @see Component#isDisplayable
* @see #toFront
* @deprecated As of JDK version 1.5, replaced by
* {@link #setVisible(boolean)}.
*/
@Deprecated
public void show() {
if (peer == null) {
addNotify();
}
validateUnconditionally();
isInShow = true;
if (visible) {
toFront();
} else {
beforeFirstShow = false;
closeSplashScreen();
Dialog.checkShouldBeBlocked(this);
super.show();
locationByPlatform = false;
for (int i = 0; i < ownedWindowList.size(); i++) {
Window child = ownedWindowList.elementAt(i).get();
if ((child != null) && child.showWithParent) {
child.show();
child.showWithParent = false;
} // endif
} // endfor
if (!isModalBlocked()) {
updateChildrenBlocking();
} else {
// fix for 6532736: after this window is shown, its blocker
// should be raised to front
modalBlocker.toFront_NoClientCode();
}
if (this instanceof Frame || this instanceof Dialog) {
updateChildFocusableWindowState(this);
}
}
isInShow = false;
// If first time shown, generate WindowOpened event
if ((state & OPENED) == 0) {
postWindowEvent(WindowEvent.WINDOW_OPENED);
state |= OPENED;
}
}
static void updateChildFocusableWindowState(Window w) {
if (w.getPeer() != null && w.isShowing()) {
((WindowPeer)w.getPeer()).updateFocusableWindowState();
}
for (int i = 0; i < w.ownedWindowList.size(); i++) {
Window child = w.ownedWindowList.elementAt(i).get();
if (child != null) {
updateChildFocusableWindowState(child);
}
}
}
synchronized void postWindowEvent(int id) {
if (windowListener != null
|| (eventMask & AWTEvent.WINDOW_EVENT_MASK) != 0
|| Toolkit.enabledOnToolkit(AWTEvent.WINDOW_EVENT_MASK)) {
WindowEvent e = new WindowEvent(this, id);
Toolkit.getEventQueue().postEvent(e);
}
}
/**
* Hide this Window, its subcomponents, and all of its owned children.
* The Window and its subcomponents can be made visible again
* with a call to {@code show}.
* @see #show
* @see #dispose
* @deprecated As of JDK version 1.5, replaced by
* {@link #setVisible(boolean)}.
*/
@Deprecated
public void hide() {
synchronized(ownedWindowList) {
for (int i = 0; i < ownedWindowList.size(); i++) {
Window child = ownedWindowList.elementAt(i).get();
if ((child != null) && child.visible) {
child.hide();
child.showWithParent = true;
}
}
}
if (isModalBlocked()) {
modalBlocker.unblockWindow(this);
}
super.hide();
locationByPlatform = false;
}
final void clearMostRecentFocusOwnerOnHide() {
/* do nothing */
}
/**
* Releases all of the native screen resources used by this
* {@code Window}, its subcomponents, and all of its owned
* children. That is, the resources for these {@code Component}s
* will be destroyed, any memory they consume will be returned to the
* OS, and they will be marked as undisplayable.
* <p>
* The {@code Window} and its subcomponents can be made displayable
* again by rebuilding the native resources with a subsequent call to
* {@code pack} or {@code show}. The states of the recreated
* {@code Window} and its subcomponents will be identical to the
* states of these objects at the point where the {@code Window}
* was disposed (not accounting for additional modifications between
* those actions).
* <p>
* <b>Note</b>: When the last displayable window
* within the Java virtual machine (VM) is disposed of, the VM may
* terminate. See <a href="doc-files/AWTThreadIssues.html#Autoshutdown">
* AWT Threading Issues</a> for more information.
* @see Component#isDisplayable
* @see #pack
* @see #show
*/
public void dispose() {
doDispose();
}
/*
* Fix for 4872170.
* If dispose() is called on parent then its children have to be disposed as well
* as reported in javadoc. So we need to implement this functionality even if a
* child overrides dispose() in a wrong way without calling super.dispose().
*/
void disposeImpl() {
dispose();
if (getPeer() != null) {
doDispose();
}
}
void doDispose() {
class DisposeAction implements Runnable {
public void run() {
disposing = true;
try {
// Check if this window is the fullscreen window for the
// device. Exit the fullscreen mode prior to disposing
// of the window if that's the case.
GraphicsDevice gd = getGraphicsConfiguration().getDevice();
if (gd.getFullScreenWindow() == Window.this) {
gd.setFullScreenWindow(null);
}
Object[] ownedWindowArray;
synchronized(ownedWindowList) {
ownedWindowArray = new Object[ownedWindowList.size()];
ownedWindowList.copyInto(ownedWindowArray);
}
for (int i = 0; i < ownedWindowArray.length; i++) {
Window child = (Window) (((WeakReference)
(ownedWindowArray[i])).get());
if (child != null) {
child.disposeImpl();
}
}
hide();
beforeFirstShow = true;
removeNotify();
synchronized (inputContextLock) {
if (inputContext != null) {
inputContext.dispose();
inputContext = null;
}
}
clearCurrentFocusCycleRootOnHide();
} finally {
disposing = false;
}
}
}
boolean fireWindowClosedEvent = isDisplayable();
DisposeAction action = new DisposeAction();
if (EventQueue.isDispatchThread()) {
action.run();
}
else {
try {
EventQueue.invokeAndWait(this, action);
}
catch (InterruptedException e) {
System.err.println("Disposal was interrupted:");
e.printStackTrace();
}
catch (InvocationTargetException e) {
System.err.println("Exception during disposal:");
e.printStackTrace();
}
}
// Execute outside the Runnable because postWindowEvent is
// synchronized on (this). We don't need to synchronize the call
// on the EventQueue anyways.
if (fireWindowClosedEvent) {
postWindowEvent(WindowEvent.WINDOW_CLOSED);
}
}
/*
* Should only be called while holding the tree lock.
* It's overridden here because parent == owner in Window,
* and we shouldn't adjust counter on owner
*/
void adjustListeningChildrenOnParent(long mask, int num) {
}
// Should only be called while holding tree lock
void adjustDecendantsOnParent(int num) {
// do nothing since parent == owner and we shouldn't
// ajust counter on owner
}
/**
* If this Window is visible, brings this Window to the front and may make
* it the focused Window.
* <p>
* Places this Window at the top of the stacking order and shows it in
* front of any other Windows in this VM. No action will take place if this
* Window is not visible. Some platforms do not allow Windows which own
* other Windows to appear on top of those owned Windows. Some platforms
* may not permit this VM to place its Windows above windows of native
* applications, or Windows of other VMs. This permission may depend on
* whether a Window in this VM is already focused. Every attempt will be
* made to move this Window as high as possible in the stacking order;
* however, developers should not assume that this method will move this
* Window above all other windows in every situation.
* <p>
* Developers must never assume that this Window is the focused or active
* Window until this Window receives a WINDOW_GAINED_FOCUS or WINDOW_ACTIVATED
* event. On platforms where the top-most window is the focused window, this
* method will <b>probably</b> focus this Window (if it is not already focused)
* under the following conditions:
* <ul>
* <li> The window meets the requirements outlined in the
* {@link #isFocusableWindow} method.
* <li> The window's property {@code autoRequestFocus} is of the
* {@code true} value.
* <li> Native windowing system allows the window to get focused.
* </ul>
* On platforms where the stacking order does not typically affect the focused
* window, this method will <b>probably</b> leave the focused and active
* Windows unchanged.
* <p>
* If this method causes this Window to be focused, and this Window is a
* Frame or a Dialog, it will also become activated. If this Window is
* focused, but it is not a Frame or a Dialog, then the first Frame or
* Dialog that is an owner of this Window will be activated.
* <p>
* If this window is blocked by modal dialog, then the blocking dialog
* is brought to the front and remains above the blocked window.
*
* @see #toBack
* @see #setAutoRequestFocus
* @see #isFocusableWindow
*/
public void toFront() {
toFront_NoClientCode();
}
// This functionality is implemented in a final package-private method
// to insure that it cannot be overridden by client subclasses.
final void toFront_NoClientCode() {
if (visible) {
WindowPeer peer = (WindowPeer)this.peer;
if (peer != null) {
peer.toFront();
}
if (isModalBlocked()) {
modalBlocker.toFront_NoClientCode();
}
}
}
/**
* If this Window is visible, sends this Window to the back and may cause
* it to lose focus or activation if it is the focused or active Window.
* <p>
* Places this Window at the bottom of the stacking order and shows it
* behind any other Windows in this VM. No action will take place is this
* Window is not visible. Some platforms do not allow Windows which are
* owned by other Windows to appear below their owners. Every attempt will
* be made to move this Window as low as possible in the stacking order;
* however, developers should not assume that this method will move this
* Window below all other windows in every situation.
* <p>
* Because of variations in native windowing systems, no guarantees about
* changes to the focused and active Windows can be made. Developers must
* never assume that this Window is no longer the focused or active Window
* until this Window receives a WINDOW_LOST_FOCUS or WINDOW_DEACTIVATED
* event. On platforms where the top-most window is the focused window,
* this method will <b>probably</b> cause this Window to lose focus. In
* that case, the next highest, focusable Window in this VM will receive
* focus. On platforms where the stacking order does not typically affect
* the focused window, this method will <b>probably</b> leave the focused
* and active Windows unchanged.
*
* @see #toFront
*/
public void toBack() {
toBack_NoClientCode();
}
// This functionality is implemented in a final package-private method
// to insure that it cannot be overridden by client subclasses.
final void toBack_NoClientCode() {
if(isAlwaysOnTop()) {
try {
setAlwaysOnTop(false);
}catch(SecurityException e) {
}
}
if (visible) {
WindowPeer peer = (WindowPeer)this.peer;
if (peer != null) {
peer.toBack();
}
}
}
/**
* Returns the toolkit of this frame.
* @return the toolkit of this window.
* @see Toolkit
* @see Toolkit#getDefaultToolkit
* @see Component#getToolkit
*/
public Toolkit getToolkit() {
return Toolkit.getDefaultToolkit();
}
/**
* Gets the warning string that is displayed with this window.
* If this window is insecure, the warning string is displayed
* somewhere in the visible area of the window. A window is
* insecure if there is a security manager and the security
* manager denies
* {@code AWTPermission("showWindowWithoutWarningBanner")}.
* <p>
* If the window is secure, then {@code getWarningString}
* returns {@code null}. If the window is insecure, this
* method checks for the system property
* {@code awt.appletWarning}
* and returns the string value of that property.
* @return the warning string for this window.
*/
public final String getWarningString() {
return warningString;
}
private void setWarningString() {
warningString = null;
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
try {
sm.checkPermission(SecurityConstants.AWT.TOPLEVEL_WINDOW_PERMISSION);
} catch (SecurityException se) {
// make sure the privileged action is only
// for getting the property! We don't want the
// above checkPermission call to always succeed!
warningString = AccessController.doPrivileged(
new GetPropertyAction("awt.appletWarning",
"Java Applet Window"));
}
}
}
/**
* Gets the {@code Locale} object that is associated
* with this window, if the locale has been set.
* If no locale has been set, then the default locale
* is returned.
* @return the locale that is set for this window.
* @see java.util.Locale
* @since JDK1.1
*/
public Locale getLocale() {
if (this.locale == null) {
return Locale.getDefault();
}
return this.locale;
}
/**
* Gets the input context for this window. A window always has an input context,
* which is shared by subcomponents unless they create and set their own.
* @see Component#getInputContext
* @since 1.2
*/
public InputContext getInputContext() {
synchronized (inputContextLock) {
if (inputContext == null) {
inputContext = InputContext.getInstance();
}
}
return inputContext;
}
/**
* Set the cursor image to a specified cursor.
* <p>
* The method may have no visual effect if the Java platform
* implementation and/or the native system do not support
* changing the mouse cursor shape.
* @param cursor One of the constants defined
* by the {@code Cursor} class. If this parameter is null
* then the cursor for this window will be set to the type
* Cursor.DEFAULT_CURSOR.
* @see Component#getCursor
* @see Cursor
* @since JDK1.1
*/
public void setCursor(Cursor cursor) {
if (cursor == null) {
cursor = Cursor.getPredefinedCursor(Cursor.DEFAULT_CURSOR);
}
super.setCursor(cursor);
}
/**
* Returns the owner of this window.
* @since 1.2
*/
public Window getOwner() {
return getOwner_NoClientCode();
}
final Window getOwner_NoClientCode() {
return (Window)parent;
}
/**
* Return an array containing all the windows this
* window currently owns.
* @since 1.2
*/
public Window[] getOwnedWindows() {
return getOwnedWindows_NoClientCode();
}
final Window[] getOwnedWindows_NoClientCode() {
Window realCopy[];
synchronized(ownedWindowList) {
// Recall that ownedWindowList is actually a Vector of
// WeakReferences and calling get() on one of these references
// may return null. Make two arrays-- one the size of the
// Vector (fullCopy with size fullSize), and one the size of
// all non-null get()s (realCopy with size realSize).
int fullSize = ownedWindowList.size();
int realSize = 0;
Window fullCopy[] = new Window[fullSize];
for (int i = 0; i < fullSize; i++) {
fullCopy[realSize] = ownedWindowList.elementAt(i).get();
if (fullCopy[realSize] != null) {
realSize++;
}
}
if (fullSize != realSize) {
realCopy = Arrays.copyOf(fullCopy, realSize);
} else {
realCopy = fullCopy;
}
}
return realCopy;
}
boolean isModalBlocked() {
return modalBlocker != null;
}
void setModalBlocked(Dialog blocker, boolean blocked, boolean peerCall) {
this.modalBlocker = blocked ? blocker : null;
if (peerCall) {
WindowPeer peer = (WindowPeer)this.peer;
if (peer != null) {
peer.setModalBlocked(blocker, blocked);
}
}
}
Dialog getModalBlocker() {
return modalBlocker;
}
/*
* Returns a list of all displayable Windows, i. e. all the
* Windows which peer is not null.
*
* @see #addNotify
* @see #removeNotify
*/
static IdentityArrayList<Window> getAllWindows() {
synchronized (allWindows) {
IdentityArrayList<Window> v = new IdentityArrayList<Window>();
v.addAll(allWindows);
return v;
}
}
static IdentityArrayList<Window> getAllUnblockedWindows() {
synchronized (allWindows) {
IdentityArrayList<Window> unblocked = new IdentityArrayList<Window>();
for (int i = 0; i < allWindows.size(); i++) {
Window w = allWindows.get(i);
if (!w.isModalBlocked()) {
unblocked.add(w);
}
}
return unblocked;
}
}
private static Window[] getWindows(AppContext appContext) {
synchronized (Window.class) {
Window realCopy[];
@SuppressWarnings("unchecked")
Vector<WeakReference<Window>> windowList =
(Vector<WeakReference<Window>>)appContext.get(Window.class);
if (windowList != null) {
int fullSize = windowList.size();
int realSize = 0;
Window fullCopy[] = new Window[fullSize];
for (int i = 0; i < fullSize; i++) {
Window w = windowList.get(i).get();
if (w != null) {
fullCopy[realSize++] = w;
}
}
if (fullSize != realSize) {
realCopy = Arrays.copyOf(fullCopy, realSize);
} else {
realCopy = fullCopy;
}
} else {
realCopy = new Window[0];
}
return realCopy;
}
}
/**
* Returns an array of all {@code Window}s, both owned and ownerless,
* created by this application.
* If called from an applet, the array includes only the {@code Window}s
* accessible by that applet.
* <p>
* <b>Warning:</b> this method may return system created windows, such
* as a print dialog. Applications should not assume the existence of
* these dialogs, nor should an application assume anything about these
* dialogs such as component positions, {@code LayoutManager}s
* or serialization.
*
* @see Frame#getFrames
* @see Window#getOwnerlessWindows
*
* @since 1.6
*/
public static Window[] getWindows() {
return getWindows(AppContext.getAppContext());
}
/**
* Returns an array of all {@code Window}s created by this application
* that have no owner. They include {@code Frame}s and ownerless
* {@code Dialog}s and {@code Window}s.
* If called from an applet, the array includes only the {@code Window}s
* accessible by that applet.
* <p>
* <b>Warning:</b> this method may return system created windows, such
* as a print dialog. Applications should not assume the existence of
* these dialogs, nor should an application assume anything about these
* dialogs such as component positions, {@code LayoutManager}s
* or serialization.
*
* @see Frame#getFrames
* @see Window#getWindows()
*
* @since 1.6
*/
public static Window[] getOwnerlessWindows() {
Window[] allWindows = Window.getWindows();
int ownerlessCount = 0;
for (Window w : allWindows) {
if (w.getOwner() == null) {
ownerlessCount++;
}
}
Window[] ownerless = new Window[ownerlessCount];
int c = 0;
for (Window w : allWindows) {
if (w.getOwner() == null) {
ownerless[c++] = w;
}
}
return ownerless;
}
Window getDocumentRoot() {
synchronized (getTreeLock()) {
Window w = this;
while (w.getOwner() != null) {
w = w.getOwner();
}
return w;
}
}
/**
* Specifies the modal exclusion type for this window. If a window is modal
* excluded, it is not blocked by some modal dialogs. See {@link
* java.awt.Dialog.ModalExclusionType Dialog.ModalExclusionType} for
* possible modal exclusion types.
* <p>
* If the given type is not supported, {@code NO_EXCLUDE} is used.
* <p>
* Note: changing the modal exclusion type for a visible window may have no
* effect until it is hidden and then shown again.
*
* @param exclusionType the modal exclusion type for this window; a {@code null}
* value is equivalent to {@link Dialog.ModalExclusionType#NO_EXCLUDE
* NO_EXCLUDE}
* @throws SecurityException if the calling thread does not have permission
* to set the modal exclusion property to the window with the given
* {@code exclusionType}
* @see java.awt.Dialog.ModalExclusionType
* @see java.awt.Window#getModalExclusionType
* @see java.awt.Toolkit#isModalExclusionTypeSupported
*
* @since 1.6
*/
public void setModalExclusionType(Dialog.ModalExclusionType exclusionType) {
if (exclusionType == null) {
exclusionType = Dialog.ModalExclusionType.NO_EXCLUDE;
}
if (!Toolkit.getDefaultToolkit().isModalExclusionTypeSupported(exclusionType)) {
exclusionType = Dialog.ModalExclusionType.NO_EXCLUDE;
}
if (modalExclusionType == exclusionType) {
return;
}
if (exclusionType == Dialog.ModalExclusionType.TOOLKIT_EXCLUDE) {
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(SecurityConstants.AWT.TOOLKIT_MODALITY_PERMISSION);
}
}
modalExclusionType = exclusionType;
// if we want on-fly changes, we need to uncomment the lines below
// and override the method in Dialog to use modalShow() instead
// of updateChildrenBlocking()
/*
if (isModalBlocked()) {
modalBlocker.unblockWindow(this);
}
Dialog.checkShouldBeBlocked(this);
updateChildrenBlocking();
*/
}
/**
* Returns the modal exclusion type of this window.
*
* @return the modal exclusion type of this window
*
* @see java.awt.Dialog.ModalExclusionType
* @see java.awt.Window#setModalExclusionType
*
* @since 1.6
*/
public Dialog.ModalExclusionType getModalExclusionType() {
return modalExclusionType;
}
boolean isModalExcluded(Dialog.ModalExclusionType exclusionType) {
if ((modalExclusionType != null) &&
modalExclusionType.compareTo(exclusionType) >= 0)
{
return true;
}
Window owner = getOwner_NoClientCode();
return (owner != null) && owner.isModalExcluded(exclusionType);
}
void updateChildrenBlocking() {
Vector<Window> childHierarchy = new Vector<Window>();
Window[] ownedWindows = getOwnedWindows();
for (int i = 0; i < ownedWindows.length; i++) {
childHierarchy.add(ownedWindows[i]);
}
int k = 0;
while (k < childHierarchy.size()) {
Window w = childHierarchy.get(k);
if (w.isVisible()) {
if (w.isModalBlocked()) {
Dialog blocker = w.getModalBlocker();
blocker.unblockWindow(w);
}
Dialog.checkShouldBeBlocked(w);
Window[] wOwned = w.getOwnedWindows();
for (int j = 0; j < wOwned.length; j++) {
childHierarchy.add(wOwned[j]);
}
}
k++;
}
}
/**
* Adds the specified window listener to receive window events from
* this window.
* If l is null, no exception is thrown and no action is performed.
* <p>Refer to <a href="doc-files/AWTThreadIssues.html#ListenersThreads"
* >AWT Threading Issues</a> for details on AWT's threading model.
*
* @param l the window listener
* @see #removeWindowListener
* @see #getWindowListeners
*/
public synchronized void addWindowListener(WindowListener l) {
if (l == null) {
return;
}
newEventsOnly = true;
windowListener = AWTEventMulticaster.add(windowListener, l);
}
/**
* Adds the specified window state listener to receive window
* events from this window. If {@code l} is {@code null},
* no exception is thrown and no action is performed.
* <p>Refer to <a href="doc-files/AWTThreadIssues.html#ListenersThreads"
* >AWT Threading Issues</a> for details on AWT's threading model.
*
* @param l the window state listener
* @see #removeWindowStateListener
* @see #getWindowStateListeners
* @since 1.4
*/
public synchronized void addWindowStateListener(WindowStateListener l) {
if (l == null) {
return;
}
windowStateListener = AWTEventMulticaster.add(windowStateListener, l);
newEventsOnly = true;
}
/**
* Adds the specified window focus listener to receive window events
* from this window.
* If l is null, no exception is thrown and no action is performed.
* <p>Refer to <a href="doc-files/AWTThreadIssues.html#ListenersThreads"
* >AWT Threading Issues</a> for details on AWT's threading model.
*
* @param l the window focus listener
* @see #removeWindowFocusListener
* @see #getWindowFocusListeners
* @since 1.4
*/
public synchronized void addWindowFocusListener(WindowFocusListener l) {
if (l == null) {
return;
}
windowFocusListener = AWTEventMulticaster.add(windowFocusListener, l);
newEventsOnly = true;
}
/**
* Removes the specified window listener so that it no longer
* receives window events from this window.
* If l is null, no exception is thrown and no action is performed.
* <p>Refer to <a href="doc-files/AWTThreadIssues.html#ListenersThreads"
* >AWT Threading Issues</a> for details on AWT's threading model.
*
* @param l the window listener
* @see #addWindowListener
* @see #getWindowListeners
*/
public synchronized void removeWindowListener(WindowListener l) {
if (l == null) {
return;
}
windowListener = AWTEventMulticaster.remove(windowListener, l);
}
/**
* Removes the specified window state listener so that it no
* longer receives window events from this window. If
* {@code l} is {@code null}, no exception is thrown and
* no action is performed.
* <p>Refer to <a href="doc-files/AWTThreadIssues.html#ListenersThreads"
* >AWT Threading Issues</a> for details on AWT's threading model.
*
* @param l the window state listener
* @see #addWindowStateListener
* @see #getWindowStateListeners
* @since 1.4
*/
public synchronized void removeWindowStateListener(WindowStateListener l) {
if (l == null) {
return;
}
windowStateListener = AWTEventMulticaster.remove(windowStateListener, l);
}
/**
* Removes the specified window focus listener so that it no longer
* receives window events from this window.
* If l is null, no exception is thrown and no action is performed.
* <p>Refer to <a href="doc-files/AWTThreadIssues.html#ListenersThreads"
* >AWT Threading Issues</a> for details on AWT's threading model.
*
* @param l the window focus listener
* @see #addWindowFocusListener
* @see #getWindowFocusListeners
* @since 1.4
*/
public synchronized void removeWindowFocusListener(WindowFocusListener l) {
if (l == null) {
return;
}
windowFocusListener = AWTEventMulticaster.remove(windowFocusListener, l);
}
/**
* Returns an array of all the window listeners
* registered on this window.
*
* @return all of this window's {@code WindowListener}s
* or an empty array if no window
* listeners are currently registered
*
* @see #addWindowListener
* @see #removeWindowListener
* @since 1.4
*/
public synchronized WindowListener[] getWindowListeners() {
return getListeners(WindowListener.class);
}
/**
* Returns an array of all the window focus listeners
* registered on this window.
*
* @return all of this window's {@code WindowFocusListener}s
* or an empty array if no window focus
* listeners are currently registered
*
* @see #addWindowFocusListener
* @see #removeWindowFocusListener
* @since 1.4
*/
public synchronized WindowFocusListener[] getWindowFocusListeners() {
return getListeners(WindowFocusListener.class);
}
/**
* Returns an array of all the window state listeners
* registered on this window.
*
* @return all of this window's {@code WindowStateListener}s
* or an empty array if no window state
* listeners are currently registered
*
* @see #addWindowStateListener
* @see #removeWindowStateListener
* @since 1.4
*/
public synchronized WindowStateListener[] getWindowStateListeners() {
return getListeners(WindowStateListener.class);
}
/**
* Returns an array of all the objects currently registered
* as <code><em>Foo</em>Listener</code>s
* upon this {@code Window}.
* <code><em>Foo</em>Listener</code>s are registered using the
* <code>add<em>Foo</em>Listener</code> method.
*
* <p>
*
* You can specify the {@code listenerType} argument
* with a class literal, such as
* <code><em>Foo</em>Listener.class</code>.
* For example, you can query a
* {@code Window} {@code w}
* for its window listeners with the following code:
*
* <pre>WindowListener[] wls = (WindowListener[])(w.getListeners(WindowListener.class));</pre>
*
* If no such listeners exist, this method returns an empty array.
*
* @param listenerType the type of listeners requested; this parameter
* should specify an interface that descends from
* {@code java.util.EventListener}
* @return an array of all objects registered as
* <code><em>Foo</em>Listener</code>s on this window,
* or an empty array if no such
* listeners have been added
* @exception ClassCastException if {@code listenerType}
* doesn't specify a class or interface that implements
* {@code java.util.EventListener}
* @exception NullPointerException if {@code listenerType} is {@code null}
*
* @see #getWindowListeners
* @since 1.3
*/
public <T extends EventListener> T[] getListeners(Class<T> listenerType) {
EventListener l = null;
if (listenerType == WindowFocusListener.class) {
l = windowFocusListener;
} else if (listenerType == WindowStateListener.class) {
l = windowStateListener;
} else if (listenerType == WindowListener.class) {
l = windowListener;
} else {
return super.getListeners(listenerType);
}
return AWTEventMulticaster.getListeners(l, listenerType);
}
// REMIND: remove when filtering is handled at lower level
boolean eventEnabled(AWTEvent e) {
switch(e.id) {
case WindowEvent.WINDOW_OPENED:
case WindowEvent.WINDOW_CLOSING:
case WindowEvent.WINDOW_CLOSED:
case WindowEvent.WINDOW_ICONIFIED:
case WindowEvent.WINDOW_DEICONIFIED:
case WindowEvent.WINDOW_ACTIVATED:
case WindowEvent.WINDOW_DEACTIVATED:
if ((eventMask & AWTEvent.WINDOW_EVENT_MASK) != 0 ||
windowListener != null) {
return true;
}
return false;
case WindowEvent.WINDOW_GAINED_FOCUS:
case WindowEvent.WINDOW_LOST_FOCUS:
if ((eventMask & AWTEvent.WINDOW_FOCUS_EVENT_MASK) != 0 ||
windowFocusListener != null) {
return true;
}
return false;
case WindowEvent.WINDOW_STATE_CHANGED:
if ((eventMask & AWTEvent.WINDOW_STATE_EVENT_MASK) != 0 ||
windowStateListener != null) {
return true;
}
return false;
default:
break;
}
return super.eventEnabled(e);
}
/**
* Processes events on this window. If the event is an
* {@code WindowEvent}, it invokes the
* {@code processWindowEvent} method, else it invokes its
* superclass's {@code processEvent}.
* <p>Note that if the event parameter is {@code null}
* the behavior is unspecified and may result in an
* exception.
*
* @param e the event
*/
protected void processEvent(AWTEvent e) {
if (e instanceof WindowEvent) {
switch (e.getID()) {
case WindowEvent.WINDOW_OPENED:
case WindowEvent.WINDOW_CLOSING:
case WindowEvent.WINDOW_CLOSED:
case WindowEvent.WINDOW_ICONIFIED:
case WindowEvent.WINDOW_DEICONIFIED:
case WindowEvent.WINDOW_ACTIVATED:
case WindowEvent.WINDOW_DEACTIVATED:
processWindowEvent((WindowEvent)e);
break;
case WindowEvent.WINDOW_GAINED_FOCUS:
case WindowEvent.WINDOW_LOST_FOCUS:
processWindowFocusEvent((WindowEvent)e);
break;
case WindowEvent.WINDOW_STATE_CHANGED:
processWindowStateEvent((WindowEvent)e);
break;
}
return;
}
super.processEvent(e);
}
/**
* Processes window events occurring on this window by
* dispatching them to any registered WindowListener objects.
* NOTE: This method will not be called unless window events
* are enabled for this component; this happens when one of the
* following occurs:
* <ul>
* <li>A WindowListener object is registered via
* {@code addWindowListener}
* <li>Window events are enabled via {@code enableEvents}
* </ul>
* <p>Note that if the event parameter is {@code null}
* the behavior is unspecified and may result in an
* exception.
*
* @param e the window event
* @see Component#enableEvents
*/
protected void processWindowEvent(WindowEvent e) {
WindowListener listener = windowListener;
if (listener != null) {
switch(e.getID()) {
case WindowEvent.WINDOW_OPENED:
listener.windowOpened(e);
break;
case WindowEvent.WINDOW_CLOSING:
listener.windowClosing(e);
break;
case WindowEvent.WINDOW_CLOSED:
listener.windowClosed(e);
break;
case WindowEvent.WINDOW_ICONIFIED:
listener.windowIconified(e);
break;
case WindowEvent.WINDOW_DEICONIFIED:
listener.windowDeiconified(e);
break;
case WindowEvent.WINDOW_ACTIVATED:
listener.windowActivated(e);
break;
case WindowEvent.WINDOW_DEACTIVATED:
listener.windowDeactivated(e);
break;
default:
break;
}
}
}
/**
* Processes window focus event occurring on this window by
* dispatching them to any registered WindowFocusListener objects.
* NOTE: this method will not be called unless window focus events
* are enabled for this window. This happens when one of the
* following occurs:
* <ul>
* <li>a WindowFocusListener is registered via
* {@code addWindowFocusListener}
* <li>Window focus events are enabled via {@code enableEvents}
* </ul>
* <p>Note that if the event parameter is {@code null}
* the behavior is unspecified and may result in an
* exception.
*
* @param e the window focus event
* @see Component#enableEvents
* @since 1.4
*/
protected void processWindowFocusEvent(WindowEvent e) {
WindowFocusListener listener = windowFocusListener;
if (listener != null) {
switch (e.getID()) {
case WindowEvent.WINDOW_GAINED_FOCUS:
listener.windowGainedFocus(e);
break;
case WindowEvent.WINDOW_LOST_FOCUS:
listener.windowLostFocus(e);
break;
default:
break;
}
}
}
/**
* Processes window state event occurring on this window by
* dispatching them to any registered {@code WindowStateListener}
* objects.
* NOTE: this method will not be called unless window state events
* are enabled for this window. This happens when one of the
* following occurs:
* <ul>
* <li>a {@code WindowStateListener} is registered via
* {@code addWindowStateListener}
* <li>window state events are enabled via {@code enableEvents}
* </ul>
* <p>Note that if the event parameter is {@code null}
* the behavior is unspecified and may result in an
* exception.
*
* @param e the window state event
* @see java.awt.Component#enableEvents
* @since 1.4
*/
protected void processWindowStateEvent(WindowEvent e) {
WindowStateListener listener = windowStateListener;
if (listener != null) {
switch (e.getID()) {
case WindowEvent.WINDOW_STATE_CHANGED:
listener.windowStateChanged(e);
break;
default:
break;
}
}
}
/**
* Implements a debugging hook -- checks to see if
* the user has typed <i>control-shift-F1</i>. If so,
* the list of child windows is dumped to {@code System.out}.
* @param e the keyboard event
*/
void preProcessKeyEvent(KeyEvent e) {
// Dump the list of child windows to System.out.
if (e.isActionKey() && e.getKeyCode() == KeyEvent.VK_F1 &&
e.isControlDown() && e.isShiftDown() &&
e.getID() == KeyEvent.KEY_PRESSED) {
list(System.out, 0);
}
}
void postProcessKeyEvent(KeyEvent e) {
// Do nothing
}
/**
* Sets whether this window should always be above other windows. If
* there are multiple always-on-top windows, their relative order is
* unspecified and platform dependent.
* <p>
* If some other window is already always-on-top then the
* relative order between these windows is unspecified (depends on
* platform). No window can be brought to be over the always-on-top
* window except maybe another always-on-top window.
* <p>
* All windows owned by an always-on-top window inherit this state and
* automatically become always-on-top. If a window ceases to be
* always-on-top, the windows that it owns will no longer be
* always-on-top. When an always-on-top window is sent {@link #toBack
* toBack}, its always-on-top state is set to {@code false}.
*
* <p> When this method is called on a window with a value of
* {@code true}, and the window is visible and the platform
* supports always-on-top for this window, the window is immediately
* brought forward, "sticking" it in the top-most position. If the
* window isn`t currently visible, this method sets the always-on-top
* state to {@code true} but does not bring the window forward.
* When the window is later shown, it will be always-on-top.
*
* <p> When this method is called on a window with a value of
* {@code false} the always-on-top state is set to normal. It may also
* cause an unspecified, platform-dependent change in the z-order of
* top-level windows, but other always-on-top windows will remain in
* top-most position. Calling this method with a value of {@code false}
* on a window that has a normal state has no effect.
*
* <p><b>Note</b>: some platforms might not support always-on-top
* windows. To detect if always-on-top windows are supported by the
* current platform, use {@link Toolkit#isAlwaysOnTopSupported()} and
* {@link Window#isAlwaysOnTopSupported()}. If always-on-top mode
* isn't supported for this window or this window's toolkit does not
* support always-on-top windows, calling this method has no effect.
* <p>
* If a SecurityManager is installed, the calling thread must be
* granted the AWTPermission "setWindowAlwaysOnTop" in
* order to set the value of this property. If this
* permission is not granted, this method will throw a
* SecurityException, and the current value of the property will
* be left unchanged.
*
* @param alwaysOnTop true if the window should always be above other
* windows
* @throws SecurityException if the calling thread does not have
* permission to set the value of always-on-top property
*
* @see #isAlwaysOnTop
* @see #toFront
* @see #toBack
* @see AWTPermission
* @see #isAlwaysOnTopSupported
* @see #getToolkit
* @see Toolkit#isAlwaysOnTopSupported
* @since 1.5
*/
public final void setAlwaysOnTop(boolean alwaysOnTop) throws SecurityException {
SecurityManager security = System.getSecurityManager();
if (security != null) {
security.checkPermission(SecurityConstants.AWT.SET_WINDOW_ALWAYS_ON_TOP_PERMISSION);
}
boolean oldAlwaysOnTop;
synchronized(this) {
oldAlwaysOnTop = this.alwaysOnTop;
this.alwaysOnTop = alwaysOnTop;
}
if (oldAlwaysOnTop != alwaysOnTop ) {
if (isAlwaysOnTopSupported()) {
WindowPeer peer = (WindowPeer)this.peer;
synchronized(getTreeLock()) {
if (peer != null) {
peer.updateAlwaysOnTopState();
}
}
}
firePropertyChange("alwaysOnTop", oldAlwaysOnTop, alwaysOnTop);
}
setOwnedWindowsAlwaysOnTop(alwaysOnTop);
}
@SuppressWarnings({"rawtypes", "unchecked"})
private void setOwnedWindowsAlwaysOnTop(boolean alwaysOnTop) {
WeakReference<Window>[] ownedWindowArray;
synchronized (ownedWindowList) {
ownedWindowArray = new WeakReference[ownedWindowList.size()];
ownedWindowList.copyInto(ownedWindowArray);
}
for (WeakReference<Window> ref : ownedWindowArray) {
Window window = ref.get();
if (window != null) {
try {
window.setAlwaysOnTop(alwaysOnTop);
} catch (SecurityException ignore) {
}
}
}
}
/**
* Returns whether the always-on-top mode is supported for this
* window. Some platforms may not support always-on-top windows, some
* may support only some kinds of top-level windows; for example,
* a platform may not support always-on-top modal dialogs.
*
* @return {@code true}, if the always-on-top mode is supported for
* this window and this window's toolkit supports always-on-top windows,
* {@code false} otherwise
*
* @see #setAlwaysOnTop(boolean)
* @see #getToolkit
* @see Toolkit#isAlwaysOnTopSupported
* @since 1.6
*/
public boolean isAlwaysOnTopSupported() {
return Toolkit.getDefaultToolkit().isAlwaysOnTopSupported();
}
/**
* Returns whether this window is an always-on-top window.
* @return {@code true}, if the window is in always-on-top state,
* {@code false} otherwise
* @see #setAlwaysOnTop
* @since 1.5
*/
public final boolean isAlwaysOnTop() {
return alwaysOnTop;
}
/**
* Returns the child Component of this Window that has focus if this Window
* is focused; returns null otherwise.
*
* @return the child Component with focus, or null if this Window is not
* focused
* @see #getMostRecentFocusOwner
* @see #isFocused
*/
public Component getFocusOwner() {
return (isFocused())
? KeyboardFocusManager.getCurrentKeyboardFocusManager().
getFocusOwner()
: null;
}
/**
* Returns the child Component of this Window that will receive the focus
* when this Window is focused. If this Window is currently focused, this
* method returns the same Component as {@code getFocusOwner()}. If
* this Window is not focused, then the child Component that most recently
* requested focus will be returned. If no child Component has ever
* requested focus, and this is a focusable Window, then this Window's
* initial focusable Component is returned. If no child Component has ever
* requested focus, and this is a non-focusable Window, null is returned.
*
* @return the child Component that will receive focus when this Window is
* focused
* @see #getFocusOwner
* @see #isFocused
* @see #isFocusableWindow
* @since 1.4
*/
public Component getMostRecentFocusOwner() {
if (isFocused()) {
return getFocusOwner();
} else {
Component mostRecent =
KeyboardFocusManager.getMostRecentFocusOwner(this);
if (mostRecent != null) {
return mostRecent;
} else {
return (isFocusableWindow())
? getFocusTraversalPolicy().getInitialComponent(this)
: null;
}
}
}
/**
* Returns whether this Window is active. Only a Frame or a Dialog may be
* active. The native windowing system may denote the active Window or its
* children with special decorations, such as a highlighted title bar. The
* active Window is always either the focused Window, or the first Frame or
* Dialog that is an owner of the focused Window.
*
* @return whether this is the active Window.
* @see #isFocused
* @since 1.4
*/
public boolean isActive() {
return (KeyboardFocusManager.getCurrentKeyboardFocusManager().
getActiveWindow() == this);
}
/**
* Returns whether this Window is focused. If there exists a focus owner,
* the focused Window is the Window that is, or contains, that focus owner.
* If there is no focus owner, then no Window is focused.
* <p>
* If the focused Window is a Frame or a Dialog it is also the active
* Window. Otherwise, the active Window is the first Frame or Dialog that
* is an owner of the focused Window.
*
* @return whether this is the focused Window.
* @see #isActive
* @since 1.4
*/
public boolean isFocused() {
return (KeyboardFocusManager.getCurrentKeyboardFocusManager().
getGlobalFocusedWindow() == this);
}
/**
* Gets a focus traversal key for this Window. (See {@code
* setFocusTraversalKeys} for a full description of each key.)
* <p>
* If the traversal key has not been explicitly set for this Window,
* then this Window's parent's traversal key is returned. If the
* traversal key has not been explicitly set for any of this Window's
* ancestors, then the current KeyboardFocusManager's default traversal key
* is returned.
*
* @param id one of KeyboardFocusManager.FORWARD_TRAVERSAL_KEYS,
* KeyboardFocusManager.BACKWARD_TRAVERSAL_KEYS,
* KeyboardFocusManager.UP_CYCLE_TRAVERSAL_KEYS, or
* KeyboardFocusManager.DOWN_CYCLE_TRAVERSAL_KEYS
* @return the AWTKeyStroke for the specified key
* @see Container#setFocusTraversalKeys
* @see KeyboardFocusManager#FORWARD_TRAVERSAL_KEYS
* @see KeyboardFocusManager#BACKWARD_TRAVERSAL_KEYS
* @see KeyboardFocusManager#UP_CYCLE_TRAVERSAL_KEYS
* @see KeyboardFocusManager#DOWN_CYCLE_TRAVERSAL_KEYS
* @throws IllegalArgumentException if id is not one of
* KeyboardFocusManager.FORWARD_TRAVERSAL_KEYS,
* KeyboardFocusManager.BACKWARD_TRAVERSAL_KEYS,
* KeyboardFocusManager.UP_CYCLE_TRAVERSAL_KEYS, or
* KeyboardFocusManager.DOWN_CYCLE_TRAVERSAL_KEYS
* @since 1.4
*/
@SuppressWarnings("unchecked")
public Set<AWTKeyStroke> getFocusTraversalKeys(int id) {
if (id < 0 || id >= KeyboardFocusManager.TRAVERSAL_KEY_LENGTH) {
throw new IllegalArgumentException("invalid focus traversal key identifier");
}
// Okay to return Set directly because it is an unmodifiable view
@SuppressWarnings("rawtypes")
Set keystrokes = (focusTraversalKeys != null)
? focusTraversalKeys[id]
: null;
if (keystrokes != null) {
return keystrokes;
} else {
return KeyboardFocusManager.getCurrentKeyboardFocusManager().
getDefaultFocusTraversalKeys(id);
}
}
/**
* Does nothing because Windows must always be roots of a focus traversal
* cycle. The passed-in value is ignored.
*
* @param focusCycleRoot this value is ignored
* @see #isFocusCycleRoot
* @see Container#setFocusTraversalPolicy
* @see Container#getFocusTraversalPolicy
* @since 1.4
*/
public final void setFocusCycleRoot(boolean focusCycleRoot) {
}
/**
* Always returns {@code true} because all Windows must be roots of a
* focus traversal cycle.
*
* @return {@code true}
* @see #setFocusCycleRoot
* @see Container#setFocusTraversalPolicy
* @see Container#getFocusTraversalPolicy
* @since 1.4
*/
public final boolean isFocusCycleRoot() {
return true;
}
/**
* Always returns {@code null} because Windows have no ancestors; they
* represent the top of the Component hierarchy.
*
* @return {@code null}
* @see Container#isFocusCycleRoot()
* @since 1.4
*/
public final Container getFocusCycleRootAncestor() {
return null;
}
/**
* Returns whether this Window can become the focused Window, that is,
* whether this Window or any of its subcomponents can become the focus
* owner. For a Frame or Dialog to be focusable, its focusable Window state
* must be set to {@code true}. For a Window which is not a Frame or
* Dialog to be focusable, its focusable Window state must be set to
* {@code true}, its nearest owning Frame or Dialog must be
* showing on the screen, and it must contain at least one Component in
* its focus traversal cycle. If any of these conditions is not met, then
* neither this Window nor any of its subcomponents can become the focus
* owner.
*
* @return {@code true} if this Window can be the focused Window;
* {@code false} otherwise
* @see #getFocusableWindowState
* @see #setFocusableWindowState
* @see #isShowing
* @see Component#isFocusable
* @since 1.4
*/
public final boolean isFocusableWindow() {
// If a Window/Frame/Dialog was made non-focusable, then it is always
// non-focusable.
if (!getFocusableWindowState()) {
return false;
}
// All other tests apply only to Windows.
if (this instanceof Frame || this instanceof Dialog) {
return true;
}
// A Window must have at least one Component in its root focus
// traversal cycle to be focusable.
if (getFocusTraversalPolicy().getDefaultComponent(this) == null) {
return false;
}
// A Window's nearest owning Frame or Dialog must be showing on the
// screen.
for (Window owner = getOwner(); owner != null;
owner = owner.getOwner())
{
if (owner instanceof Frame || owner instanceof Dialog) {
return owner.isShowing();
}
}
return false;
}
/**
* Returns whether this Window can become the focused Window if it meets
* the other requirements outlined in {@code isFocusableWindow}. If
* this method returns {@code false}, then
* {@code isFocusableWindow} will return {@code false} as well.
* If this method returns {@code true}, then
* {@code isFocusableWindow} may return {@code true} or
* {@code false} depending upon the other requirements which must be
* met in order for a Window to be focusable.
* <p>
* By default, all Windows have a focusable Window state of
* {@code true}.
*
* @return whether this Window can be the focused Window
* @see #isFocusableWindow
* @see #setFocusableWindowState
* @see #isShowing
* @see Component#setFocusable
* @since 1.4
*/
public boolean getFocusableWindowState() {
return focusableWindowState;
}
/**
* Sets whether this Window can become the focused Window if it meets
* the other requirements outlined in {@code isFocusableWindow}. If
* this Window's focusable Window state is set to {@code false}, then
* {@code isFocusableWindow} will return {@code false}. If this
* Window's focusable Window state is set to {@code true}, then
* {@code isFocusableWindow} may return {@code true} or
* {@code false} depending upon the other requirements which must be
* met in order for a Window to be focusable.
* <p>
* Setting a Window's focusability state to {@code false} is the
* standard mechanism for an application to identify to the AWT a Window
* which will be used as a floating palette or toolbar, and thus should be
* a non-focusable Window.
*
* Setting the focusability state on a visible {@code Window}
* can have a delayed effect on some platforms — the actual
* change may happen only when the {@code Window} becomes
* hidden and then visible again. To ensure consistent behavior
* across platforms, set the {@code Window}'s focusable state
* when the {@code Window} is invisible and then show it.
*
* @param focusableWindowState whether this Window can be the focused
* Window
* @see #isFocusableWindow
* @see #getFocusableWindowState
* @see #isShowing
* @see Component#setFocusable
* @since 1.4
*/
public void setFocusableWindowState(boolean focusableWindowState) {
boolean oldFocusableWindowState;
synchronized (this) {
oldFocusableWindowState = this.focusableWindowState;
this.focusableWindowState = focusableWindowState;
}
WindowPeer peer = (WindowPeer)this.peer;
if (peer != null) {
peer.updateFocusableWindowState();
}
firePropertyChange("focusableWindowState", oldFocusableWindowState,
focusableWindowState);
if (oldFocusableWindowState && !focusableWindowState && isFocused()) {
for (Window owner = getOwner();
owner != null;
owner = owner.getOwner())
{
Component toFocus =
KeyboardFocusManager.getMostRecentFocusOwner(owner);
if (toFocus != null && toFocus.requestFocus(false, CausedFocusEvent.Cause.ACTIVATION)) {
return;
}
}
KeyboardFocusManager.getCurrentKeyboardFocusManager().
clearGlobalFocusOwnerPriv();
}
}
/**
* Sets whether this window should receive focus on
* subsequently being shown (with a call to {@link #setVisible setVisible(true)}),
* or being moved to the front (with a call to {@link #toFront}).
* <p>
* Note that {@link #setVisible setVisible(true)} may be called indirectly
* (e.g. when showing an owner of the window makes the window to be shown).
* {@link #toFront} may also be called indirectly (e.g. when
* {@link #setVisible setVisible(true)} is called on already visible window).
* In all such cases this property takes effect as well.
* <p>
* The value of the property is not inherited by owned windows.
*
* @param autoRequestFocus whether this window should be focused on
* subsequently being shown or being moved to the front
* @see #isAutoRequestFocus
* @see #isFocusableWindow
* @see #setVisible
* @see #toFront
* @since 1.7
*/
public void setAutoRequestFocus(boolean autoRequestFocus) {
this.autoRequestFocus = autoRequestFocus;
}
/**
* Returns whether this window should receive focus on subsequently being shown
* (with a call to {@link #setVisible setVisible(true)}), or being moved to the front
* (with a call to {@link #toFront}).
* <p>
* By default, the window has {@code autoRequestFocus} value of {@code true}.
*
* @return {@code autoRequestFocus} value
* @see #setAutoRequestFocus
* @since 1.7
*/
public boolean isAutoRequestFocus() {
return autoRequestFocus;
}
/**
* Adds a PropertyChangeListener to the listener list. The listener is
* registered for all bound properties of this class, including the
* following:
* <ul>
* <li>this Window's font ("font")</li>
* <li>this Window's background color ("background")</li>
* <li>this Window's foreground color ("foreground")</li>
* <li>this Window's focusability ("focusable")</li>
* <li>this Window's focus traversal keys enabled state
* ("focusTraversalKeysEnabled")</li>
* <li>this Window's Set of FORWARD_TRAVERSAL_KEYS
* ("forwardFocusTraversalKeys")</li>
* <li>this Window's Set of BACKWARD_TRAVERSAL_KEYS
* ("backwardFocusTraversalKeys")</li>
* <li>this Window's Set of UP_CYCLE_TRAVERSAL_KEYS
* ("upCycleFocusTraversalKeys")</li>
* <li>this Window's Set of DOWN_CYCLE_TRAVERSAL_KEYS
* ("downCycleFocusTraversalKeys")</li>
* <li>this Window's focus traversal policy ("focusTraversalPolicy")
* </li>
* <li>this Window's focusable Window state ("focusableWindowState")
* </li>
* <li>this Window's always-on-top state("alwaysOnTop")</li>
* </ul>
* Note that if this Window is inheriting a bound property, then no
* event will be fired in response to a change in the inherited property.
* <p>
* If listener is null, no exception is thrown and no action is performed.
*
* @param listener the PropertyChangeListener to be added
*
* @see Component#removePropertyChangeListener
* @see #addPropertyChangeListener(java.lang.String,java.beans.PropertyChangeListener)
*/
public void addPropertyChangeListener(PropertyChangeListener listener) {
super.addPropertyChangeListener(listener);
}
/**
* Adds a PropertyChangeListener to the listener list for a specific
* property. The specified property may be user-defined, or one of the
* following:
* <ul>
* <li>this Window's font ("font")</li>
* <li>this Window's background color ("background")</li>
* <li>this Window's foreground color ("foreground")</li>
* <li>this Window's focusability ("focusable")</li>
* <li>this Window's focus traversal keys enabled state
* ("focusTraversalKeysEnabled")</li>
* <li>this Window's Set of FORWARD_TRAVERSAL_KEYS
* ("forwardFocusTraversalKeys")</li>
* <li>this Window's Set of BACKWARD_TRAVERSAL_KEYS
* ("backwardFocusTraversalKeys")</li>
* <li>this Window's Set of UP_CYCLE_TRAVERSAL_KEYS
* ("upCycleFocusTraversalKeys")</li>
* <li>this Window's Set of DOWN_CYCLE_TRAVERSAL_KEYS
* ("downCycleFocusTraversalKeys")</li>
* <li>this Window's focus traversal policy ("focusTraversalPolicy")
* </li>
* <li>this Window's focusable Window state ("focusableWindowState")
* </li>
* <li>this Window's always-on-top state("alwaysOnTop")</li>
* </ul>
* Note that if this Window is inheriting a bound property, then no
* event will be fired in response to a change in the inherited property.
* <p>
* If listener is null, no exception is thrown and no action is performed.
*
* @param propertyName one of the property names listed above
* @param listener the PropertyChangeListener to be added
*
* @see #addPropertyChangeListener(java.beans.PropertyChangeListener)
* @see Component#removePropertyChangeListener
*/
public void addPropertyChangeListener(String propertyName,
PropertyChangeListener listener) {
super.addPropertyChangeListener(propertyName, listener);
}
/**
* Indicates if this container is a validate root.
* <p>
* {@code Window} objects are the validate roots, and, therefore, they
* override this method to return {@code true}.
*
* @return {@code true}
* @since 1.7
* @see java.awt.Container#isValidateRoot
*/
@Override
public boolean isValidateRoot() {
return true;
}
/**
* Dispatches an event to this window or one of its sub components.
* @param e the event
*/
void dispatchEventImpl(AWTEvent e) {
if (e.getID() == ComponentEvent.COMPONENT_RESIZED) {
invalidate();
validate();
}
super.dispatchEventImpl(e);
}
/**
* @deprecated As of JDK version 1.1
* replaced by {@code dispatchEvent(AWTEvent)}.
*/
@Deprecated
public boolean postEvent(Event e) {
if (handleEvent(e)) {
e.consume();
return true;
}
return false;
}
/**
* Checks if this Window is showing on screen.
* @see Component#setVisible
*/
public boolean isShowing() {
return visible;
}
boolean isDisposing() {
return disposing;
}
/**
* @deprecated As of J2SE 1.4, replaced by
* {@link Component#applyComponentOrientation Component.applyComponentOrientation}.
*/
@Deprecated
public void applyResourceBundle(ResourceBundle rb) {
applyComponentOrientation(ComponentOrientation.getOrientation(rb));
}
/**
* @deprecated As of J2SE 1.4, replaced by
* {@link Component#applyComponentOrientation Component.applyComponentOrientation}.
*/
@Deprecated
public void applyResourceBundle(String rbName) {
applyResourceBundle(ResourceBundle.getBundle(rbName,
Locale.getDefault(),
ClassLoader.getSystemClassLoader()));
}
/*
* Support for tracking all windows owned by this window
*/
void addOwnedWindow(WeakReference<Window> weakWindow) {
if (weakWindow != null) {
synchronized(ownedWindowList) {
// this if statement should really be an assert, but we don't
// have asserts...
if (!ownedWindowList.contains(weakWindow)) {
ownedWindowList.addElement(weakWindow);
}
}
}
}
void removeOwnedWindow(WeakReference<Window> weakWindow) {
if (weakWindow != null) {
// synchronized block not required since removeElement is
// already synchronized
ownedWindowList.removeElement(weakWindow);
}
}
void connectOwnedWindow(Window child) {
child.parent = this;
addOwnedWindow(child.weakThis);
child.disposerRecord.updateOwner();
}
private void addToWindowList() {
synchronized (Window.class) {
@SuppressWarnings("unchecked")
Vector<WeakReference<Window>> windowList = (Vector<WeakReference<Window>>)appContext.get(Window.class);
if (windowList == null) {
windowList = new Vector<WeakReference<Window>>();
appContext.put(Window.class, windowList);
}
windowList.add(weakThis);
}
}
private static void removeFromWindowList(AppContext context, WeakReference<Window> weakThis) {
synchronized (Window.class) {
@SuppressWarnings("unchecked")
Vector<WeakReference<Window>> windowList = (Vector<WeakReference<Window>>)context.get(Window.class);
if (windowList != null) {
windowList.remove(weakThis);
}
}
}
private void removeFromWindowList() {
removeFromWindowList(appContext, weakThis);
}
/**
* Window type.
*
* Synchronization: ObjectLock
*/
private Type type = Type.NORMAL;
/**
* Sets the type of the window.
*
* This method can only be called while the window is not displayable.
*
* @throws IllegalComponentStateException if the window
* is displayable.
* @throws IllegalArgumentException if the type is {@code null}
* @see Component#isDisplayable
* @see #getType
* @since 1.7
*/
public void setType(Type type) {
if (type == null) {
throw new IllegalArgumentException("type should not be null.");
}
synchronized (getTreeLock()) {
if (isDisplayable()) {
throw new IllegalComponentStateException(
"The window is displayable.");
}
synchronized (getObjectLock()) {
this.type = type;
}
}
}
/**
* Returns the type of the window.
*
* @see #setType
* @since 1.7
*/
public Type getType() {
synchronized (getObjectLock()) {
return type;
}
}
/**
* The window serialized data version.
*
* @serial
*/
private int windowSerializedDataVersion = 2;
/**
* Writes default serializable fields to stream. Writes
* a list of serializable {@code WindowListener}s and
* {@code WindowFocusListener}s as optional data.
* Writes a list of child windows as optional data.
* Writes a list of icon images as optional data
*
* @param s the {@code ObjectOutputStream} to write
* @serialData {@code null} terminated sequence of
* 0 or more pairs; the pair consists of a {@code String}
* and {@code Object}; the {@code String}
* indicates the type of object and is one of the following:
* {@code windowListenerK} indicating a
* {@code WindowListener} object;
* {@code windowFocusWindowK} indicating a
* {@code WindowFocusListener} object;
* {@code ownedWindowK} indicating a child
* {@code Window} object
*
* @see AWTEventMulticaster#save(java.io.ObjectOutputStream, java.lang.String, java.util.EventListener)
* @see Component#windowListenerK
* @see Component#windowFocusListenerK
* @see Component#ownedWindowK
* @see #readObject(ObjectInputStream)
*/
private void writeObject(ObjectOutputStream s) throws IOException {
synchronized (this) {
// Update old focusMgr fields so that our object stream can be read
// by previous releases
focusMgr = new FocusManager();
focusMgr.focusRoot = this;
focusMgr.focusOwner = getMostRecentFocusOwner();
s.defaultWriteObject();
// Clear fields so that we don't keep extra references around
focusMgr = null;
AWTEventMulticaster.save(s, windowListenerK, windowListener);
AWTEventMulticaster.save(s, windowFocusListenerK, windowFocusListener);
AWTEventMulticaster.save(s, windowStateListenerK, windowStateListener);
}
s.writeObject(null);
synchronized (ownedWindowList) {
for (int i = 0; i < ownedWindowList.size(); i++) {
Window child = ownedWindowList.elementAt(i).get();
if (child != null) {
s.writeObject(ownedWindowK);
s.writeObject(child);
}
}
}
s.writeObject(null);
//write icon array
if (icons != null) {
for (Image i : icons) {
if (i instanceof Serializable) {
s.writeObject(i);
}
}
}
s.writeObject(null);
}
//
// Part of deserialization procedure to be called before
// user's code.
//
private void initDeserializedWindow() {
setWarningString();
inputContextLock = new Object();
// Deserialized Windows are not yet visible.
visible = false;
weakThis = new WeakReference<>(this);
anchor = new Object();
disposerRecord = new WindowDisposerRecord(appContext, this);
sun.java2d.Disposer.addRecord(anchor, disposerRecord);
addToWindowList();
initGC(null);
ownedWindowList = new Vector<>();
}
private void deserializeResources(ObjectInputStream s)
throws ClassNotFoundException, IOException, HeadlessException {
if (windowSerializedDataVersion < 2) {
// Translate old-style focus tracking to new model. For 1.4 and
// later releases, we'll rely on the Window's initial focusable
// Component.
if (focusMgr != null) {
if (focusMgr.focusOwner != null) {
KeyboardFocusManager.
setMostRecentFocusOwner(this, focusMgr.focusOwner);
}
}
// This field is non-transient and relies on default serialization.
// However, the default value is insufficient, so we need to set
// it explicitly for object data streams prior to 1.4.
focusableWindowState = true;
}
Object keyOrNull;
while(null != (keyOrNull = s.readObject())) {
String key = ((String)keyOrNull).intern();
if (windowListenerK == key) {
addWindowListener((WindowListener)(s.readObject()));
} else if (windowFocusListenerK == key) {
addWindowFocusListener((WindowFocusListener)(s.readObject()));
} else if (windowStateListenerK == key) {
addWindowStateListener((WindowStateListener)(s.readObject()));
} else // skip value for unrecognized key
s.readObject();
}
try {
while (null != (keyOrNull = s.readObject())) {
String key = ((String)keyOrNull).intern();
if (ownedWindowK == key)
connectOwnedWindow((Window) s.readObject());
else // skip value for unrecognized key
s.readObject();
}
//read icons
Object obj = s.readObject(); //Throws OptionalDataException
//for pre1.6 objects.
icons = new ArrayList<Image>(); //Frame.readObject() assumes
//pre1.6 version if icons is null.
while (obj != null) {
if (obj instanceof Image) {
icons.add((Image)obj);
}
obj = s.readObject();
}
}
catch (OptionalDataException e) {
// 1.1 serialized form
// ownedWindowList will be updated by Frame.readObject
}
}
/**
* Reads the {@code ObjectInputStream} and an optional
* list of listeners to receive various events fired by
* the component; also reads a list of
* (possibly {@code null}) child windows.
* Unrecognized keys or values will be ignored.
*
* @param s the {@code ObjectInputStream} to read
* @exception HeadlessException if
* {@code GraphicsEnvironment.isHeadless} returns
* {@code true}
* @see java.awt.GraphicsEnvironment#isHeadless
* @see #writeObject
*/
private void readObject(ObjectInputStream s)
throws ClassNotFoundException, IOException, HeadlessException
{
GraphicsEnvironment.checkHeadless();
initDeserializedWindow();
ObjectInputStream.GetField f = s.readFields();
syncLWRequests = f.get("syncLWRequests", systemSyncLWRequests);
state = f.get("state", 0);
focusableWindowState = f.get("focusableWindowState", true);
windowSerializedDataVersion = f.get("windowSerializedDataVersion", 1);
locationByPlatform = f.get("locationByPlatform", locationByPlatformProp);
// Note: 1.4 (or later) doesn't use focusMgr
focusMgr = (FocusManager)f.get("focusMgr", null);
Dialog.ModalExclusionType et = (Dialog.ModalExclusionType)
f.get("modalExclusionType", Dialog.ModalExclusionType.NO_EXCLUDE);
setModalExclusionType(et); // since 6.0
boolean aot = f.get("alwaysOnTop", false);
if(aot) {
setAlwaysOnTop(aot); // since 1.5; subject to permission check
}
shape = (Shape)f.get("shape", null);
opacity = (Float)f.get("opacity", 1.0f);
this.securityWarningWidth = 0;
this.securityWarningHeight = 0;
this.securityWarningPointX = 2.0;
this.securityWarningPointY = 0.0;
this.securityWarningAlignmentX = RIGHT_ALIGNMENT;
this.securityWarningAlignmentY = TOP_ALIGNMENT;
deserializeResources(s);
}
/*
* --- Accessibility Support ---
*
*/
/**
* Gets the AccessibleContext associated with this Window.
* For windows, the AccessibleContext takes the form of an
* AccessibleAWTWindow.
* A new AccessibleAWTWindow instance is created if necessary.
*
* @return an AccessibleAWTWindow that serves as the
* AccessibleContext of this Window
* @since 1.3
*/
public AccessibleContext getAccessibleContext() {
if (accessibleContext == null) {
accessibleContext = new AccessibleAWTWindow();
}
return accessibleContext;
}
/**
* This class implements accessibility support for the
* {@code Window} class. It provides an implementation of the
* Java Accessibility API appropriate to window user-interface elements.
* @since 1.3
*/
protected class AccessibleAWTWindow extends AccessibleAWTContainer
{
/*
* JDK 1.3 serialVersionUID
*/
private static final long serialVersionUID = 4215068635060671780L;
/**
* Get the role of this object.
*
* @return an instance of AccessibleRole describing the role of the
* object
* @see javax.accessibility.AccessibleRole
*/
public AccessibleRole getAccessibleRole() {
return AccessibleRole.WINDOW;
}
/**
* Get the state of this object.
*
* @return an instance of AccessibleStateSet containing the current
* state set of the object
* @see javax.accessibility.AccessibleState
*/
public AccessibleStateSet getAccessibleStateSet() {
AccessibleStateSet states = super.getAccessibleStateSet();
if (getFocusOwner() != null) {
states.add(AccessibleState.ACTIVE);
}
return states;
}
} // inner class AccessibleAWTWindow
@Override
void setGraphicsConfiguration(GraphicsConfiguration gc) {
if (gc == null) {
gc = GraphicsEnvironment.
getLocalGraphicsEnvironment().
getDefaultScreenDevice().
getDefaultConfiguration();
}
synchronized (getTreeLock()) {
super.setGraphicsConfiguration(gc);
if (log.isLoggable(PlatformLogger.Level.FINER)) {
log.finer("+ Window.setGraphicsConfiguration(): new GC is \n+ " + getGraphicsConfiguration_NoClientCode() + "\n+ this is " + this);
}
}
}
/**
* Sets the location of the window relative to the specified
* component according to the following scenarios.
* <p>
* The target screen mentioned below is a screen to which
* the window should be placed after the setLocationRelativeTo
* method is called.
* <ul>
* <li>If the component is {@code null}, or the {@code
* GraphicsConfiguration} associated with this component is
* {@code null}, the window is placed in the center of the
* screen. The center point can be obtained with the {@link
* GraphicsEnvironment#getCenterPoint
* GraphicsEnvironment.getCenterPoint} method.
* <li>If the component is not {@code null}, but it is not
* currently showing, the window is placed in the center of
* the target screen defined by the {@code
* GraphicsConfiguration} associated with this component.
* <li>If the component is not {@code null} and is shown on
* the screen, then the window is located in such a way that
* the center of the window coincides with the center of the
* component.
* </ul>
* <p>
* If the screens configuration does not allow the window to
* be moved from one screen to another, then the window is
* only placed at the location determined according to the
* above conditions and its {@code GraphicsConfiguration} is
* not changed.
* <p>
* <b>Note</b>: If the lower edge of the window is out of the screen,
* then the window is placed to the side of the {@code Component}
* that is closest to the center of the screen. So if the
* component is on the right part of the screen, the window
* is placed to its left, and vice versa.
* <p>
* If after the window location has been calculated, the upper,
* left, or right edge of the window is out of the screen,
* then the window is located in such a way that the upper,
* left, or right edge of the window coincides with the
* corresponding edge of the screen. If both left and right
* edges of the window are out of the screen, the window is
* placed at the left side of the screen. The similar placement
* will occur if both top and bottom edges are out of the screen.
* In that case, the window is placed at the top side of the screen.
* <p>
* The method changes the geometry-related data. Therefore,
* the native windowing system may ignore such requests, or it may modify
* the requested data, so that the {@code Window} object is placed and sized
* in a way that corresponds closely to the desktop settings.
*
* @param c the component in relation to which the window's location
* is determined
* @see java.awt.GraphicsEnvironment#getCenterPoint
* @since 1.4
*/
public void setLocationRelativeTo(Component c) {
// target location
int dx = 0, dy = 0;
// target GC
GraphicsConfiguration gc = getGraphicsConfiguration_NoClientCode();
Rectangle gcBounds = gc.getBounds();
Dimension windowSize = getSize();
// search a top-level of c
Window componentWindow = SunToolkit.getContainingWindow(c);
if ((c == null) || (componentWindow == null)) {
GraphicsEnvironment ge = GraphicsEnvironment.getLocalGraphicsEnvironment();
gc = ge.getDefaultScreenDevice().getDefaultConfiguration();
gcBounds = gc.getBounds();
Point centerPoint = ge.getCenterPoint();
dx = centerPoint.x - windowSize.width / 2;
dy = centerPoint.y - windowSize.height / 2;
} else if (!c.isShowing()) {
gc = componentWindow.getGraphicsConfiguration();
gcBounds = gc.getBounds();
dx = gcBounds.x + (gcBounds.width - windowSize.width) / 2;
dy = gcBounds.y + (gcBounds.height - windowSize.height) / 2;
} else {
gc = componentWindow.getGraphicsConfiguration();
gcBounds = gc.getBounds();
Dimension compSize = c.getSize();
Point compLocation = c.getLocationOnScreen();
dx = compLocation.x + ((compSize.width - windowSize.width) / 2);
dy = compLocation.y + ((compSize.height - windowSize.height) / 2);
// Adjust for bottom edge being offscreen
if (dy + windowSize.height > gcBounds.y + gcBounds.height) {
dy = gcBounds.y + gcBounds.height - windowSize.height;
if (compLocation.x - gcBounds.x + compSize.width / 2 < gcBounds.width / 2) {
dx = compLocation.x + compSize.width;
} else {
dx = compLocation.x - windowSize.width;
}
}
}
// Avoid being placed off the edge of the screen:
// bottom
if (dy + windowSize.height > gcBounds.y + gcBounds.height) {
dy = gcBounds.y + gcBounds.height - windowSize.height;
}
// top
if (dy < gcBounds.y) {
dy = gcBounds.y;
}
// right
if (dx + windowSize.width > gcBounds.x + gcBounds.width) {
dx = gcBounds.x + gcBounds.width - windowSize.width;
}
// left
if (dx < gcBounds.x) {
dx = gcBounds.x;
}
setLocation(dx, dy);
}
/**
* Overridden from Component. Top-level Windows should not propagate a
* MouseWheelEvent beyond themselves into their owning Windows.
*/
void deliverMouseWheelToAncestor(MouseWheelEvent e) {}
/**
* Overridden from Component. Top-level Windows don't dispatch to ancestors
*/
boolean dispatchMouseWheelToAncestor(MouseWheelEvent e) {return false;}
/**
* Creates a new strategy for multi-buffering on this component.
* Multi-buffering is useful for rendering performance. This method
* attempts to create the best strategy available with the number of
* buffers supplied. It will always create a {@code BufferStrategy}
* with that number of buffers.
* A page-flipping strategy is attempted first, then a blitting strategy
* using accelerated buffers. Finally, an unaccelerated blitting
* strategy is used.
* <p>
* Each time this method is called,
* the existing buffer strategy for this component is discarded.
* @param numBuffers number of buffers to create
* @exception IllegalArgumentException if numBuffers is less than 1.
* @exception IllegalStateException if the component is not displayable
* @see #isDisplayable
* @see #getBufferStrategy
* @since 1.4
*/
public void createBufferStrategy(int numBuffers) {
super.createBufferStrategy(numBuffers);
}
/**
* Creates a new strategy for multi-buffering on this component with the
* required buffer capabilities. This is useful, for example, if only
* accelerated memory or page flipping is desired (as specified by the
* buffer capabilities).
* <p>
* Each time this method
* is called, the existing buffer strategy for this component is discarded.
* @param numBuffers number of buffers to create, including the front buffer
* @param caps the required capabilities for creating the buffer strategy;
* cannot be {@code null}
* @exception AWTException if the capabilities supplied could not be
* supported or met; this may happen, for example, if there is not enough
* accelerated memory currently available, or if page flipping is specified
* but not possible.
* @exception IllegalArgumentException if numBuffers is less than 1, or if
* caps is {@code null}
* @see #getBufferStrategy
* @since 1.4
*/
public void createBufferStrategy(int numBuffers,
BufferCapabilities caps) throws AWTException {
super.createBufferStrategy(numBuffers, caps);
}
/**
* Returns the {@code BufferStrategy} used by this component. This
* method will return null if a {@code BufferStrategy} has not yet
* been created or has been disposed.
*
* @return the buffer strategy used by this component
* @see #createBufferStrategy
* @since 1.4
*/
public BufferStrategy getBufferStrategy() {
return super.getBufferStrategy();
}
Component getTemporaryLostComponent() {
return temporaryLostComponent;
}
Component setTemporaryLostComponent(Component component) {
Component previousComp = temporaryLostComponent;
// Check that "component" is an acceptable focus owner and don't store it otherwise
// - or later we will have problems with opposite while handling WINDOW_GAINED_FOCUS
if (component == null || component.canBeFocusOwner()) {
temporaryLostComponent = component;
} else {
temporaryLostComponent = null;
}
return previousComp;
}
/**
* Checks whether this window can contain focus owner.
* Verifies that it is focusable and as container it can container focus owner.
* @since 1.5
*/
boolean canContainFocusOwner(Component focusOwnerCandidate) {
return super.canContainFocusOwner(focusOwnerCandidate) && isFocusableWindow();
}
private volatile boolean locationByPlatform = locationByPlatformProp;
/**
* Sets whether this Window should appear at the default location for the
* native windowing system or at the current location (returned by
* {@code getLocation}) the next time the Window is made visible.
* This behavior resembles a native window shown without programmatically
* setting its location. Most windowing systems cascade windows if their
* locations are not explicitly set. The actual location is determined once the
* window is shown on the screen.
* <p>
* This behavior can also be enabled by setting the System Property
* "java.awt.Window.locationByPlatform" to "true", though calls to this method
* take precedence.
* <p>
* Calls to {@code setVisible}, {@code setLocation} and
* {@code setBounds} after calling {@code setLocationByPlatform} clear
* this property of the Window.
* <p>
* For example, after the following code is executed:
* <pre>
* setLocationByPlatform(true);
* setVisible(true);
* boolean flag = isLocationByPlatform();
* </pre>
* The window will be shown at platform's default location and
* {@code flag} will be {@code false}.
* <p>
* In the following sample:
* <pre>
* setLocationByPlatform(true);
* setLocation(10, 10);
* boolean flag = isLocationByPlatform();
* setVisible(true);
* </pre>
* The window will be shown at (10, 10) and {@code flag} will be
* {@code false}.
*
* @param locationByPlatform {@code true} if this Window should appear
* at the default location, {@code false} if at the current location
* @throws IllegalComponentStateException if the window
* is showing on screen and locationByPlatform is {@code true}.
* @see #setLocation
* @see #isShowing
* @see #setVisible
* @see #isLocationByPlatform
* @see java.lang.System#getProperty(String)
* @since 1.5
*/
public void setLocationByPlatform(boolean locationByPlatform) {
synchronized (getTreeLock()) {
if (locationByPlatform && isShowing()) {
throw new IllegalComponentStateException("The window is showing on screen.");
}
this.locationByPlatform = locationByPlatform;
}
}
/**
* Returns {@code true} if this Window will appear at the default location
* for the native windowing system the next time this Window is made visible.
* This method always returns {@code false} if the Window is showing on the
* screen.
*
* @return whether this Window will appear at the default location
* @see #setLocationByPlatform
* @see #isShowing
* @since 1.5
*/
public boolean isLocationByPlatform() {
return locationByPlatform;
}
/**
* {@inheritDoc}
* <p>
* The {@code width} or {@code height} values
* are automatically enlarged if either is less than
* the minimum size as specified by previous call to
* {@code setMinimumSize}.
* <p>
* The method changes the geometry-related data. Therefore,
* the native windowing system may ignore such requests, or it may modify
* the requested data, so that the {@code Window} object is placed and sized
* in a way that corresponds closely to the desktop settings.
*
* @see #getBounds
* @see #setLocation(int, int)
* @see #setLocation(Point)
* @see #setSize(int, int)
* @see #setSize(Dimension)
* @see #setMinimumSize
* @see #setLocationByPlatform
* @see #isLocationByPlatform
* @since 1.6
*/
public void setBounds(int x, int y, int width, int height) {
synchronized (getTreeLock()) {
if (getBoundsOp() == ComponentPeer.SET_LOCATION ||
getBoundsOp() == ComponentPeer.SET_BOUNDS)
{
locationByPlatform = false;
}
super.setBounds(x, y, width, height);
}
}
/**
* {@inheritDoc}
* <p>
* The {@code r.width} or {@code r.height} values
* will be automatically enlarged if either is less than
* the minimum size as specified by previous call to
* {@code setMinimumSize}.
* <p>
* The method changes the geometry-related data. Therefore,
* the native windowing system may ignore such requests, or it may modify
* the requested data, so that the {@code Window} object is placed and sized
* in a way that corresponds closely to the desktop settings.
*
* @see #getBounds
* @see #setLocation(int, int)
* @see #setLocation(Point)
* @see #setSize(int, int)
* @see #setSize(Dimension)
* @see #setMinimumSize
* @see #setLocationByPlatform
* @see #isLocationByPlatform
* @since 1.6
*/
public void setBounds(Rectangle r) {
setBounds(r.x, r.y, r.width, r.height);
}
/**
* Determines whether this component will be displayed on the screen.
* @return {@code true} if the component and all of its ancestors
* until a toplevel window are visible, {@code false} otherwise
*/
boolean isRecursivelyVisible() {
// 5079694 fix: for a toplevel to be displayed, its parent doesn't have to be visible.
// We're overriding isRecursivelyVisible to implement this policy.
return visible;
}
// ******************** SHAPES & TRANSPARENCY CODE ********************
/**
* Returns the opacity of the window.
*
* @return the opacity of the window
*
* @see Window#setOpacity(float)
* @see GraphicsDevice.WindowTranslucency
*
* @since 1.7
*/
public float getOpacity() {
return opacity;
}
/**
* Sets the opacity of the window.
* <p>
* The opacity value is in the range [0..1]. Note that setting the opacity
* level of 0 may or may not disable the mouse event handling on this
* window. This is a platform-dependent behavior.
* <p>
* The following conditions must be met in order to set the opacity value
* less than {@code 1.0f}:
* <ul>
* <li>The {@link GraphicsDevice.WindowTranslucency#TRANSLUCENT TRANSLUCENT}
* translucency must be supported by the underlying system
* <li>The window must be undecorated (see {@link Frame#setUndecorated}
* and {@link Dialog#setUndecorated})
* <li>The window must not be in full-screen mode (see {@link
* GraphicsDevice#setFullScreenWindow(Window)})
* </ul>
* <p>
* If the requested opacity value is less than {@code 1.0f}, and any of the
* above conditions are not met, the window opacity will not change,
* and the {@code IllegalComponentStateException} will be thrown.
* <p>
* The translucency levels of individual pixels may also be effected by the
* alpha component of their color (see {@link Window#setBackground(Color)}) and the
* current shape of this window (see {@link #setShape(Shape)}).
*
* @param opacity the opacity level to set to the window
*
* @throws IllegalArgumentException if the opacity is out of the range
* [0..1]
* @throws IllegalComponentStateException if the window is decorated and
* the opacity is less than {@code 1.0f}
* @throws IllegalComponentStateException if the window is in full screen
* mode, and the opacity is less than {@code 1.0f}
* @throws UnsupportedOperationException if the {@code
* GraphicsDevice.WindowTranslucency#TRANSLUCENT TRANSLUCENT}
* translucency is not supported and the opacity is less than
* {@code 1.0f}
*
* @see Window#getOpacity
* @see Window#setBackground(Color)
* @see Window#setShape(Shape)
* @see Frame#isUndecorated
* @see Dialog#isUndecorated
* @see GraphicsDevice.WindowTranslucency
* @see GraphicsDevice#isWindowTranslucencySupported(GraphicsDevice.WindowTranslucency)
*
* @since 1.7
*/
public void setOpacity(float opacity) {
synchronized (getTreeLock()) {
if (opacity < 0.0f || opacity > 1.0f) {
throw new IllegalArgumentException(
"The value of opacity should be in the range [0.0f .. 1.0f].");
}
if (opacity < 1.0f) {
GraphicsConfiguration gc = getGraphicsConfiguration();
GraphicsDevice gd = gc.getDevice();
if (gc.getDevice().getFullScreenWindow() == this) {
throw new IllegalComponentStateException(
"Setting opacity for full-screen window is not supported.");
}
if (!gd.isWindowTranslucencySupported(
GraphicsDevice.WindowTranslucency.TRANSLUCENT))
{
throw new UnsupportedOperationException(
"TRANSLUCENT translucency is not supported.");
}
}
this.opacity = opacity;
WindowPeer peer = (WindowPeer)getPeer();
if (peer != null) {
peer.setOpacity(opacity);
}
}
}
/**
* Returns the shape of the window.
*
* The value returned by this method may not be the same as
* previously set with {@code setShape(shape)}, but it is guaranteed
* to represent the same shape.
*
* @return the shape of the window or {@code null} if no
* shape is specified for the window
*
* @see Window#setShape(Shape)
* @see GraphicsDevice.WindowTranslucency
*
* @since 1.7
*/
public Shape getShape() {
synchronized (getTreeLock()) {
return shape == null ? null : new Path2D.Float(shape);
}
}
/**
* Sets the shape of the window.
* <p>
* Setting a shape cuts off some parts of the window. Only the parts that
* belong to the given {@link Shape} remain visible and clickable. If
* the shape argument is {@code null}, this method restores the default
* shape, making the window rectangular on most platforms.
* <p>
* The following conditions must be met to set a non-null shape:
* <ul>
* <li>The {@link GraphicsDevice.WindowTranslucency#PERPIXEL_TRANSPARENT
* PERPIXEL_TRANSPARENT} translucency must be supported by the
* underlying system
* <li>The window must be undecorated (see {@link Frame#setUndecorated}
* and {@link Dialog#setUndecorated})
* <li>The window must not be in full-screen mode (see {@link
* GraphicsDevice#setFullScreenWindow(Window)})
* </ul>
* <p>
* If the requested shape is not {@code null}, and any of the above
* conditions are not met, the shape of this window will not change,
* and either the {@code UnsupportedOperationException} or {@code
* IllegalComponentStateException} will be thrown.
* <p>
* The translucency levels of individual pixels may also be effected by the
* alpha component of their color (see {@link Window#setBackground(Color)}) and the
* opacity value (see {@link #setOpacity(float)}). See {@link
* GraphicsDevice.WindowTranslucency} for more details.
*
* @param shape the shape to set to the window
*
* @throws IllegalComponentStateException if the shape is not {@code
* null} and the window is decorated
* @throws IllegalComponentStateException if the shape is not {@code
* null} and the window is in full-screen mode
* @throws UnsupportedOperationException if the shape is not {@code
* null} and {@link GraphicsDevice.WindowTranslucency#PERPIXEL_TRANSPARENT
* PERPIXEL_TRANSPARENT} translucency is not supported
*
* @see Window#getShape()
* @see Window#setBackground(Color)
* @see Window#setOpacity(float)
* @see Frame#isUndecorated
* @see Dialog#isUndecorated
* @see GraphicsDevice.WindowTranslucency
* @see GraphicsDevice#isWindowTranslucencySupported(GraphicsDevice.WindowTranslucency)
*
* @since 1.7
*/
public void setShape(Shape shape) {
synchronized (getTreeLock()) {
if (shape != null) {
GraphicsConfiguration gc = getGraphicsConfiguration();
GraphicsDevice gd = gc.getDevice();
if (gc.getDevice().getFullScreenWindow() == this) {
throw new IllegalComponentStateException(
"Setting shape for full-screen window is not supported.");
}
if (!gd.isWindowTranslucencySupported(
GraphicsDevice.WindowTranslucency.PERPIXEL_TRANSPARENT))
{
throw new UnsupportedOperationException(
"PERPIXEL_TRANSPARENT translucency is not supported.");
}
}
this.shape = (shape == null) ? null : new Path2D.Float(shape);
WindowPeer peer = (WindowPeer)getPeer();
if (peer != null) {
peer.applyShape(shape == null ? null : Region.getInstance(shape, null));
}
}
}
/**
* Gets the background color of this window.
* <p>
* Note that the alpha component of the returned color indicates whether
* the window is in the non-opaque (per-pixel translucent) mode.
*
* @return this component's background color
*
* @see Window#setBackground(Color)
* @see Window#isOpaque
* @see GraphicsDevice.WindowTranslucency
*/
@Override
public Color getBackground() {
return super.getBackground();
}
/**
* Sets the background color of this window.
* <p>
* If the windowing system supports the {@link
* GraphicsDevice.WindowTranslucency#PERPIXEL_TRANSLUCENT PERPIXEL_TRANSLUCENT}
* translucency, the alpha component of the given background color
* may effect the mode of operation for this window: it indicates whether
* this window must be opaque (alpha equals {@code 1.0f}) or per-pixel translucent
* (alpha is less than {@code 1.0f}). If the given background color is
* {@code null}, the window is considered completely opaque.
* <p>
* All the following conditions must be met to enable the per-pixel
* transparency mode for this window:
* <ul>
* <li>The {@link GraphicsDevice.WindowTranslucency#PERPIXEL_TRANSLUCENT
* PERPIXEL_TRANSLUCENT} translucency must be supported by the graphics
* device where this window is located
* <li>The window must be undecorated (see {@link Frame#setUndecorated}
* and {@link Dialog#setUndecorated})
* <li>The window must not be in full-screen mode (see {@link
* GraphicsDevice#setFullScreenWindow(Window)})
* </ul>
* <p>
* If the alpha component of the requested background color is less than
* {@code 1.0f}, and any of the above conditions are not met, the background
* color of this window will not change, the alpha component of the given
* background color will not affect the mode of operation for this window,
* and either the {@code UnsupportedOperationException} or {@code
* IllegalComponentStateException} will be thrown.
* <p>
* When the window is per-pixel translucent, the drawing sub-system
* respects the alpha value of each individual pixel. If a pixel gets
* painted with the alpha color component equal to zero, it becomes
* visually transparent. If the alpha of the pixel is equal to 1.0f, the
* pixel is fully opaque. Interim values of the alpha color component make
* the pixel semi-transparent. In this mode, the background of the window
* gets painted with the alpha value of the given background color. If the
* alpha value of the argument of this method is equal to {@code 0}, the
* background is not painted at all.
* <p>
* The actual level of translucency of a given pixel also depends on window
* opacity (see {@link #setOpacity(float)}), as well as the current shape of
* this window (see {@link #setShape(Shape)}).
* <p>
* Note that painting a pixel with the alpha value of {@code 0} may or may
* not disable the mouse event handling on this pixel. This is a
* platform-dependent behavior. To make sure the mouse events do not get
* dispatched to a particular pixel, the pixel must be excluded from the
* shape of the window.
* <p>
* Enabling the per-pixel translucency mode may change the graphics
* configuration of this window due to the native platform requirements.
*
* @param bgColor the color to become this window's background color.
*
* @throws IllegalComponentStateException if the alpha value of the given
* background color is less than {@code 1.0f} and the window is decorated
* @throws IllegalComponentStateException if the alpha value of the given
* background color is less than {@code 1.0f} and the window is in
* full-screen mode
* @throws UnsupportedOperationException if the alpha value of the given
* background color is less than {@code 1.0f} and {@link
* GraphicsDevice.WindowTranslucency#PERPIXEL_TRANSLUCENT
* PERPIXEL_TRANSLUCENT} translucency is not supported
*
* @see Window#getBackground
* @see Window#isOpaque
* @see Window#setOpacity(float)
* @see Window#setShape(Shape)
* @see Frame#isUndecorated
* @see Dialog#isUndecorated
* @see GraphicsDevice.WindowTranslucency
* @see GraphicsDevice#isWindowTranslucencySupported(GraphicsDevice.WindowTranslucency)
* @see GraphicsConfiguration#isTranslucencyCapable()
*/
@Override
public void setBackground(Color bgColor) {
Color oldBg = getBackground();
super.setBackground(bgColor);
if (oldBg != null && oldBg.equals(bgColor)) {
return;
}
int oldAlpha = oldBg != null ? oldBg.getAlpha() : 255;
int alpha = bgColor != null ? bgColor.getAlpha() : 255;
if ((oldAlpha == 255) && (alpha < 255)) { // non-opaque window
GraphicsConfiguration gc = getGraphicsConfiguration();
GraphicsDevice gd = gc.getDevice();
if (gc.getDevice().getFullScreenWindow() == this) {
throw new IllegalComponentStateException(
"Making full-screen window non opaque is not supported.");
}
if (!gc.isTranslucencyCapable()) {
GraphicsConfiguration capableGC = gd.getTranslucencyCapableGC();
if (capableGC == null) {
throw new UnsupportedOperationException(
"PERPIXEL_TRANSLUCENT translucency is not supported");
}
setGraphicsConfiguration(capableGC);
}
setLayersOpaque(this, false);
} else if ((oldAlpha < 255) && (alpha == 255)) {
setLayersOpaque(this, true);
}
WindowPeer peer = (WindowPeer)getPeer();
if (peer != null) {
peer.setOpaque(alpha == 255);
}
}
/**
* Indicates if the window is currently opaque.
* <p>
* The method returns {@code false} if the background color of the window
* is not {@code null} and the alpha component of the color is less than
* {@code 1.0f}. The method returns {@code true} otherwise.
*
* @return {@code true} if the window is opaque, {@code false} otherwise
*
* @see Window#getBackground
* @see Window#setBackground(Color)
* @since 1.7
*/
@Override
public boolean isOpaque() {
Color bg = getBackground();
return bg != null ? bg.getAlpha() == 255 : true;
}
private void updateWindow() {
synchronized (getTreeLock()) {
WindowPeer peer = (WindowPeer)getPeer();
if (peer != null) {
peer.updateWindow();
}
}
}
/**
* {@inheritDoc}
*
* @since 1.7
*/
@Override
public void paint(Graphics g) {
if (!isOpaque()) {
Graphics gg = g.create();
try {
if (gg instanceof Graphics2D) {
gg.setColor(getBackground());
((Graphics2D)gg).setComposite(AlphaComposite.getInstance(AlphaComposite.SRC));
gg.fillRect(0, 0, getWidth(), getHeight());
}
} finally {
gg.dispose();
}
}
super.paint(g);
}
private static void setLayersOpaque(Component component, boolean isOpaque) {
// Shouldn't use instanceof to avoid loading Swing classes
// if it's a pure AWT application.
if (SunToolkit.isInstanceOf(component, "javax.swing.RootPaneContainer")) {
javax.swing.RootPaneContainer rpc = (javax.swing.RootPaneContainer)component;
javax.swing.JRootPane root = rpc.getRootPane();
javax.swing.JLayeredPane lp = root.getLayeredPane();
Container c = root.getContentPane();
javax.swing.JComponent content =
(c instanceof javax.swing.JComponent) ? (javax.swing.JComponent)c : null;
lp.setOpaque(isOpaque);
root.setOpaque(isOpaque);
if (content != null) {
content.setOpaque(isOpaque);
// Iterate down one level to see whether we have a JApplet
// (which is also a RootPaneContainer) which requires processing
int numChildren = content.getComponentCount();
if (numChildren > 0) {
Component child = content.getComponent(0);
// It's OK to use instanceof here because we've
// already loaded the RootPaneContainer class by now
if (child instanceof javax.swing.RootPaneContainer) {
setLayersOpaque(child, isOpaque);
}
}
}
}
}
// ************************** MIXING CODE *******************************
// A window has an owner, but it does NOT have a container
@Override
final Container getContainer() {
return null;
}
/**
* Applies the shape to the component
* @param shape Shape to be applied to the component
*/
@Override
final void applyCompoundShape(Region shape) {
// The shape calculated by mixing code is not intended to be applied
// to windows or frames
}
@Override
final void applyCurrentShape() {
// The shape calculated by mixing code is not intended to be applied
// to windows or frames
}
@Override
final void mixOnReshaping() {
// The shape calculated by mixing code is not intended to be applied
// to windows or frames
}
@Override
final Point getLocationOnWindow() {
return new Point(0, 0);
}
// ****************** END OF MIXING CODE ********************************
/**
* Limit the given double value with the given range.
*/
private static double limit(double value, double min, double max) {
value = Math.max(value, min);
value = Math.min(value, max);
return value;
}
/**
* Calculate the position of the security warning.
*
* This method gets the window location/size as reported by the native
* system since the locally cached values may represent outdated data.
*
* The method is used from the native code, or via AWTAccessor.
*
* NOTE: this method is invoked on the toolkit thread, and therefore is not
* supposed to become public/user-overridable.
*/
private Point2D calculateSecurityWarningPosition(double x, double y,
double w, double h)
{
// The position according to the spec of SecurityWarning.setPosition()
double wx = x + w * securityWarningAlignmentX + securityWarningPointX;
double wy = y + h * securityWarningAlignmentY + securityWarningPointY;
// First, make sure the warning is not too far from the window bounds
wx = Window.limit(wx,
x - securityWarningWidth - 2,
x + w + 2);
wy = Window.limit(wy,
y - securityWarningHeight - 2,
y + h + 2);
// Now make sure the warning window is visible on the screen
GraphicsConfiguration graphicsConfig =
getGraphicsConfiguration_NoClientCode();
Rectangle screenBounds = graphicsConfig.getBounds();
Insets screenInsets =
Toolkit.getDefaultToolkit().getScreenInsets(graphicsConfig);
wx = Window.limit(wx,
screenBounds.x + screenInsets.left,
screenBounds.x + screenBounds.width - screenInsets.right
- securityWarningWidth);
wy = Window.limit(wy,
screenBounds.y + screenInsets.top,
screenBounds.y + screenBounds.height - screenInsets.bottom
- securityWarningHeight);
return new Point2D.Double(wx, wy);
}
static {
AWTAccessor.setWindowAccessor(new AWTAccessor.WindowAccessor() {
public float getOpacity(Window window) {
return window.opacity;
}
public void setOpacity(Window window, float opacity) {
window.setOpacity(opacity);
}
public Shape getShape(Window window) {
return window.getShape();
}
public void setShape(Window window, Shape shape) {
window.setShape(shape);
}
public void setOpaque(Window window, boolean opaque) {
Color bg = window.getBackground();
if (bg == null) {
bg = new Color(0, 0, 0, 0);
}
window.setBackground(new Color(bg.getRed(), bg.getGreen(), bg.getBlue(),
opaque ? 255 : 0));
}
public void updateWindow(Window window) {
window.updateWindow();
}
public Dimension getSecurityWarningSize(Window window) {
return new Dimension(window.securityWarningWidth,
window.securityWarningHeight);
}
public void setSecurityWarningSize(Window window, int width, int height)
{
window.securityWarningWidth = width;
window.securityWarningHeight = height;
}
public void setSecurityWarningPosition(Window window,
Point2D point, float alignmentX, float alignmentY)
{
window.securityWarningPointX = point.getX();
window.securityWarningPointY = point.getY();
window.securityWarningAlignmentX = alignmentX;
window.securityWarningAlignmentY = alignmentY;
synchronized (window.getTreeLock()) {
WindowPeer peer = (WindowPeer)window.getPeer();
if (peer != null) {
peer.repositionSecurityWarning();
}
}
}
public Point2D calculateSecurityWarningPosition(Window window,
double x, double y, double w, double h)
{
return window.calculateSecurityWarningPosition(x, y, w, h);
}
public void setLWRequestStatus(Window changed, boolean status) {
changed.syncLWRequests = status;
}
public boolean isAutoRequestFocus(Window w) {
return w.autoRequestFocus;
}
public boolean isTrayIconWindow(Window w) {
return w.isTrayIconWindow;
}
public void setTrayIconWindow(Window w, boolean isTrayIconWindow) {
w.isTrayIconWindow = isTrayIconWindow;
}
public Window[] getOwnedWindows(Window w) {
return w.getOwnedWindows_NoClientCode();
}
}); // WindowAccessor
} // static
// a window doesn't need to be updated in the Z-order.
@Override
void updateZOrder() {}
} // class Window
/**
* This class is no longer used, but is maintained for Serialization
* backward-compatibility.
*/
class FocusManager implements java.io.Serializable {
Container focusRoot;
Component focusOwner;
/*
* JDK 1.1 serialVersionUID
*/
static final long serialVersionUID = 2491878825643557906L;
}
| 61,874 |
2,146 | <filename>examples/breakpad/boo.cpp
#include <google_breakpad/common/breakpad_types.h>
int main() {
}
| 40 |
545 | <filename>third_party/java/jarjar/jarjar-core/src/main/java/com/tonicsystems/jarjar/transform/asm/ClassTransformer.java
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package com.tonicsystems.jarjar.transform.asm;
import javax.annotation.Nonnull;
import org.objectweb.asm.ClassVisitor;
/**
*
* @author shevek
*/
public interface ClassTransformer {
@Nonnull
public ClassVisitor transform(@Nonnull ClassVisitor v);
}
| 176 |
640 | #include <ulib.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#define MATSIZE 10
static int mata[MATSIZE][MATSIZE];
static int matb[MATSIZE][MATSIZE];
static int matc[MATSIZE][MATSIZE];
void
work(unsigned int times) {
int i, j, k, size = MATSIZE;
for (i = 0; i < size; i ++) {
for (j = 0; j < size; j ++) {
mata[i][j] = matb[i][j] = 1;
}
}
yield();
cprintf("pid %d is running (%d times)!.\n", getpid(), times);
while (times -- > 0) {
for (i = 0; i < size; i ++) {
for (j = 0; j < size; j ++) {
matc[i][j] = 0;
for (k = 0; k < size; k ++) {
matc[i][j] += mata[i][k] * matb[k][j];
}
}
}
for (i = 0; i < size; i ++) {
for (j = 0; j < size; j ++) {
mata[i][j] = matb[i][j] = matc[i][j];
}
}
}
cprintf("pid %d done!.\n", getpid());
exit(0);
}
const int total = 21;
int
main(void) {
int pids[total];
memset(pids, 0, sizeof(pids));
int i;
for (i = 0; i < total; i ++) {
if ((pids[i] = fork()) == 0) {
srand(i * i);
int times = (((unsigned int)rand()) % total);
times = (times * times + 10) * 100;
work(times);
}
if (pids[i] < 0) {
goto failed;
}
}
cprintf("fork ok.\n");
for (i = 0; i < total; i ++) {
if (wait() != 0) {
cprintf("wait failed.\n");
goto failed;
}
}
cprintf("matrix pass.\n");
return 0;
failed:
for (i = 0; i < total; i ++) {
if (pids[i] > 0) {
kill(pids[i]);
}
}
panic("FAIL: T.T\n");
}
| 1,006 |
1,118 | {"deu":{"common":"Indonesien","official":"Republik Indonesien"},"fin":{"common":"Indonesia","official":"Indonesian tasavalta"},"fra":{"common":"Indonésie","official":"République d'Indonésie"},"hrv":{"common":"Indonezija","official":"Republika Indonezija"},"ita":{"common":"Indonesia","official":"Repubblica di Indonesia"},"jpn":{"common":"インドネシア","official":"インドネシア共和国"},"nld":{"common":"Indonesië","official":"Republiek Indonesië"},"por":{"common":"Indonésia","official":"República da Indonésia"},"rus":{"common":"Индонезия","official":"Республика Индонезия"},"spa":{"common":"Indonesia","official":"República de Indonesia"}}
| 232 |
329 | package kr.dogfoot.hwplib.writer.bodytext.paragraph.control;
import kr.dogfoot.hwplib.object.bodytext.control.ControlFootnote;
import kr.dogfoot.hwplib.object.bodytext.control.ctrlheader.CtrlHeaderFootnote;
import kr.dogfoot.hwplib.object.etc.HWPTag;
import kr.dogfoot.hwplib.util.compoundFile.writer.StreamWriter;
import kr.dogfoot.hwplib.writer.bodytext.paragraph.ForParagraphList;
import kr.dogfoot.hwplib.writer.bodytext.paragraph.control.endnote.ForListHeaderForFootnodeEndnote;
import java.io.IOException;
/**
* 각주 컨트롤을 쓰기 위한 객체
*
* @author neolord
*/
public class ForControlFootnote {
/**
* 각주 컨트롤을 쓴다.
*
* @param fn 각주 컨트롤
* @param sw 스트림 라이터
* @throws Exception
*/
public static void write(ControlFootnote fn, StreamWriter sw)
throws Exception {
ctrlHeader(fn.getHeader(), sw);
sw.upRecordLevel();
ForListHeaderForFootnodeEndnote.write(fn.getListHeader(), sw);
ForParagraphList.write(fn.getParagraphList(), sw);
sw.downRecordLevel();
}
/**
* 각주 컨트롤의 컨트롤 헤더 레코드를 쓴다.
*
* @param h 각주 컨트롤의 컨트롤 헤더 레코드
* @param sw 스트림 라이터
* @throws IOException
*/
private static void ctrlHeader(CtrlHeaderFootnote h, StreamWriter sw)
throws IOException {
recordHeader(sw);
sw.writeUInt4(h.getCtrlId());
sw.writeUInt4(h.getNumber());
sw.writeWChar(h.getBeforeDecorationLetter().getBytes());
sw.writeWChar(h.getAfterDecorationLetter().getBytes());
sw.writeUInt4(h.getNumberShape().getValue());
sw.writeUInt4(h.getInstanceId());
}
/**
* 컨트롤 헤더 레코드를 쓴다.
*
* @param sw 스트림 라이터
* @throws IOException
*/
private static void recordHeader(StreamWriter sw) throws IOException {
sw.writeRecordHeader(HWPTag.CTRL_HEADER, 20);
}
}
| 984 |
342 | #include "point_cloud_plane_params.h"
float m_sqrt(float x)
{
float half_x = 0.5 * x;
int i = *((int *)&x);
i = 0x5f3759df - (i >> 1);
x = *((float *)&i);
x = x * (1.5 - (half_x * x * x));
return 1 / x;
}
float getVar(float x[], int len)
{
int m = len;
float sum = 0;
for (int i = 0; i < m; i++)
{
sum += x[i];
}
float dAve = sum / m;
float dVar = 0;
for (int i = 0; i < m; i++)
{
dVar += (x[i] - dAve) * (x[i] - dAve);
}
return dVar / m;
} | 287 |
2,151 | <filename>chromecast/graphics/cast_window_manager_aura.h
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMECAST_GRAPHICS_CAST_WINDOW_MANAGER_AURA_H_
#define CHROMECAST_GRAPHICS_CAST_WINDOW_MANAGER_AURA_H_
#include <memory>
#include "base/macros.h"
#include "chromecast/graphics/cast_window_manager.h"
#include "ui/aura/client/window_parenting_client.h"
namespace aura {
namespace client {
class DefaultCaptureClient;
class ScreenPositionClient;
} // namespace client
} // namespace aura
namespace chromecast {
class CastFocusClientAura;
class CastSystemGestureEventHandler;
class CastWindowTreeHost;
class CastWindowManagerAura : public CastWindowManager,
public aura::client::WindowParentingClient {
public:
~CastWindowManagerAura() override;
// CastWindowManager implementation:
void TearDown() override;
void AddWindow(gfx::NativeView window) override;
gfx::NativeView GetRootWindow() override;
void SetWindowId(gfx::NativeView window, WindowId window_id) override;
void InjectEvent(ui::Event* event) override;
// aura::client::WindowParentingClient implementation:
aura::Window* GetDefaultParent(aura::Window* window,
const gfx::Rect& bounds) override;
void AddSideSwipeGestureHandler(
CastSideSwipeGestureHandlerInterface* handler) override;
void RemoveSideSwipeGestureHandler(
CastSideSwipeGestureHandlerInterface* handler) override;
void SetColorInversion(bool enable) override;
private:
friend class CastWindowManager;
// This class should only be instantiated by CastWindowManager::Create.
explicit CastWindowManagerAura(bool enable_input);
void Setup();
const bool enable_input_;
std::unique_ptr<CastWindowTreeHost> window_tree_host_;
std::unique_ptr<aura::client::DefaultCaptureClient> capture_client_;
std::unique_ptr<CastFocusClientAura> focus_client_;
std::unique_ptr<aura::client::ScreenPositionClient> screen_position_client_;
std::unique_ptr<CastSystemGestureEventHandler> system_gesture_event_handler_;
DISALLOW_COPY_AND_ASSIGN(CastWindowManagerAura);
};
} // namespace chromecast
#endif // CHROMECAST_GRAPHICS_CAST_WINDOW_MANAGER_AURA_H_
| 763 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.