max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
312 |
<reponame>awujek/lldpd<gh_stars>100-1000
/* -*- mode: c; c-file-style: "openbsd" -*- */
/*
* Copyright (c) 2015 <NAME> <<EMAIL>>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <stdlib.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <check.h>
#include "common.h"
char filenameprefix[] = "edp_send";
#ifdef ENABLE_EDP
START_TEST (test_send_basic)
{
int n;
/* Packet we should build:
Extreme Discovery Protocol
Version: 1
Reserved: 0
Data length: 74
Checksum: 0xde22 [correct]
[Good: True]
[Bad : False]
Sequence number: 0
Machine ID type: MAC (0)
Machine MAC: 5e:10:8e:e7:84:ad (5e:10:8e:e7:84:ad)
Display: "First chassis"
Marker 0x99, length 18, type 1 = Display
TLV Marker: 0x99
TLV type: Display (1)
TLV length: 18
Name: First chassis
Info: Slot/Port: 1/4, Version: 7.6.4.99
Marker 0x99, length 36, type 2 = Info
TLV Marker: 0x99
TLV type: Info (2)
TLV length: 36
Slot: 1
Port: 4
Virt chassis: 0
Reserved: 000000000000
Version: 7.6.4 Internal: 99
Version: 0x07060463
Version (major1): 7
Version (major2): 6
Version (sustaining): 4
Version (internal): 99
Connections: FFFFFFFF000000000000000000000000
Null
Marker 0x99, length 4, type 0 = Null
TLV Marker: 0x99
TLV type: Null (0)
TLV length: 4
*/
char pkt1[] = {
0x00, 0xe0, 0x2b, 0x00, 0x00, 0x00, 0x5e, 0x10,
0x8e, 0xe7, 0x84, 0xad, 0x00, 0x52, 0xaa, 0xaa,
0x03, 0x00, 0xe0, 0x2b, 0x00, 0xbb, 0x01, 0x00,
0x00, 0x4a, 0xde, 0x22, 0x00, 0x00, 0x00, 0x00,
0x5e, 0x10, 0x8e, 0xe7, 0x84, 0xad, 0x99, 0x01,
0x00, 0x12, 0x46, 0x69, 0x72, 0x73, 0x74, 0x20,
0x63, 0x68, 0x61, 0x73, 0x73, 0x69, 0x73, 0x00,
0x99, 0x02, 0x00, 0x24, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x07, 0x06, 0x04, 0x63, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x99, 0x00, 0x00, 0x04 };
struct packet *pkt;
/* Populate port and chassis */
hardware.h_lport.p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME;
hardware.h_lport.p_id = "Not used";
hardware.h_lport.p_id_len = strlen(hardware.h_lport.p_id);
hardware.h_lport.p_descr = "Not used";
strlcpy(hardware.h_ifname, "eth3", sizeof(hardware.h_ifname));
hardware.h_ifindex = 4;
chassis.c_id_subtype = LLDP_CHASSISID_SUBTYPE_LLADDR;
chassis.c_id = macaddress;
chassis.c_id_len = ETHER_ADDR_LEN;
chassis.c_name = "First chassis";
/* Build packet */
n = edp_send(NULL, &hardware);
if (n != 0) {
fail("unable to build packet");
return;
}
if (TAILQ_EMPTY(&pkts)) {
fail("no packets sent");
return;
}
pkt = TAILQ_FIRST(&pkts);
ck_assert_int_eq(pkt->size, sizeof(pkt1));
fail_unless(memcmp(pkt->data, pkt1, sizeof(pkt1)) == 0);
fail_unless(TAILQ_NEXT(pkt, next) == NULL, "more than one packet sent");
}
END_TEST
#ifdef ENABLE_DOT1
START_TEST (test_send_vlans)
{
int n;
/* Packets we should build:
Extreme Discovery Protocol
Version: 1
Reserved: 0
Data length: 74
Checksum: 0xde20 [correct]
[Good: True]
[Bad : False]
Sequence number: 2
Machine ID type: MAC (0)
Machine MAC: 5e:10:8e:e7:84:ad (5e:10:8e:e7:84:ad)
Display: "First chassis"
Marker 0x99, length 18, type 1 = Display
TLV Marker: 0x99
TLV type: Display (1)
TLV length: 18
Name: First chassis
Info: Slot/Port: 1/4, Version: 7.6.4.99
Marker 0x99, length 36, type 2 = Info
TLV Marker: 0x99
TLV type: Info (2)
TLV length: 36
Slot: 1
Port: 4
Virt chassis: 0
Reserved: 000000000000
Version: 7.6.4 Internal: 99
Version: 0x07060463
Version (major1): 7
Version (major2): 6
Version (sustaining): 4
Version (internal): 99
Connections: FFFFFFFF000000000000000000000000
Null
Marker 0x99, length 4, type 0 = Null
TLV Marker: 0x99
TLV type: Null (0)
TLV length: 4
Extreme Discovery Protocol
Version: 1
Reserved: 0
Data length: 102
Checksum: 0x28c4 [correct]
[Good: True]
[Bad : False]
Sequence number: 3
Machine ID type: MAC (0)
Machine MAC: 5e:10:8e:e7:84:ad (5e:10:8e:e7:84:ad)
Vlan: ID 157, Name "First VLAN"
Marker 0x99, length 27, type 5 = VL
TLV Marker: 0x99
TLV type: VL (5)
TLV length: 27
Flags: 0x00
0... .... = Flags-IP: Not set
.000 000. = Flags-reserved: 0x00
.... ...0 = Flags-Unknown: Not set
Reserved1: 00
Vlan ID: 157
Reserved2: 00000000
IP addr: 0.0.0.0 (0.0.0.0)
Name: First VLAN
Vlan: ID 1247, Name "Second VLAN"
Marker 0x99, length 28, type 5 = VL
TLV Marker: 0x99
TLV type: VL (5)
TLV length: 28
Flags: 0x00
0... .... = Flags-IP: Not set
.000 000. = Flags-reserved: 0x00
.... ...0 = Flags-Unknown: Not set
Reserved1: 00
Vlan ID: 1247
Reserved2: 00000000
IP addr: 0.0.0.0 (0.0.0.0)
Name: Second VLAN
Vlan: ID 741, Name "Third VLAN"
Marker 0x99, length 27, type 5 = VL
TLV Marker: 0x99
TLV type: VL (5)
TLV length: 27
Flags: 0x00
0... .... = Flags-IP: Not set
.000 000. = Flags-reserved: 0x00
.... ...0 = Flags-Unknown: Not set
Reserved1: 00
Vlan ID: 741
Reserved2: 00000000
IP addr: 0.0.0.0 (0.0.0.0)
Name: Third VLAN
Null
Marker 0x99, length 4, type 0 = Null
TLV Marker: 0x99
TLV type: Null (0)
TLV length: 4
*/
char pkt1[] = {
0x00, 0xe0, 0x2b, 0x00, 0x00, 0x00, 0x5e, 0x10,
0x8e, 0xe7, 0x84, 0xad, 0x00, 0x52, 0xaa, 0xaa,
0x03, 0x00, 0xe0, 0x2b, 0x00, 0xbb, 0x01, 0x00,
0x00, 0x4a, 0xde, 0x20, 0x00, 0x02, 0x00, 0x00,
0x5e, 0x10, 0x8e, 0xe7, 0x84, 0xad, 0x99, 0x01,
0x00, 0x12, 0x46, 0x69, 0x72, 0x73, 0x74, 0x20,
0x63, 0x68, 0x61, 0x73, 0x73, 0x69, 0x73, 0x00,
0x99, 0x02, 0x00, 0x24, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x07, 0x06, 0x04, 0x63, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x99, 0x00, 0x00, 0x04 };
char pkt2[] = {
0x00, 0xe0, 0x2b, 0x00, 0x00, 0x00, 0x5e, 0x10,
0x8e, 0xe7, 0x84, 0xad, 0x00, 0x6e, 0xaa, 0xaa,
0x03, 0x00, 0xe0, 0x2b, 0x00, 0xbb, 0x01, 0x00,
0x00, 0x66, 0x28, 0xc4, 0x00, 0x03, 0x00, 0x00,
0x5e, 0x10, 0x8e, 0xe7, 0x84, 0xad, 0x99, 0x05,
0x00, 0x1b, 0x00, 0x00, 0x00, 0x9d, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x69,
0x72, 0x73, 0x74, 0x20, 0x56, 0x4c, 0x41, 0x4e,
0x00, 0x99, 0x05, 0x00, 0x1c, 0x00, 0x00, 0x04,
0xdf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20,
0x56, 0x4c, 0x41, 0x4e, 0x00, 0x99, 0x05, 0x00,
0x1b, 0x00, 0x00, 0x02, 0xe5, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x68, 0x69,
0x72, 0x64, 0x20, 0x56, 0x4c, 0x41, 0x4e, 0x00,
0x99, 0x00, 0x00, 0x04 };
struct packet *pkt;
struct lldpd_vlan vlan1, vlan2, vlan3;
/* Populate port and chassis */
hardware.h_lport.p_id_subtype = LLDP_PORTID_SUBTYPE_IFNAME;
hardware.h_lport.p_id = "Not used";
hardware.h_lport.p_id_len = strlen(hardware.h_lport.p_id);
hardware.h_lport.p_descr = "Not used";
strlcpy(hardware.h_ifname, "eth3", sizeof(hardware.h_ifname));
hardware.h_ifindex = 4;
chassis.c_id_subtype = LLDP_CHASSISID_SUBTYPE_LLADDR;
chassis.c_id = macaddress;
chassis.c_id_len = ETHER_ADDR_LEN;
chassis.c_name = "First chassis";
vlan1.v_name = "First VLAN"; vlan1.v_vid = 157;
vlan2.v_name = "Second VLAN"; vlan2.v_vid = 1247;
vlan3.v_name = "Third VLAN"; vlan3.v_vid = 741;
TAILQ_INSERT_TAIL(&hardware.h_lport.p_vlans, &vlan1, v_entries);
TAILQ_INSERT_TAIL(&hardware.h_lport.p_vlans, &vlan2, v_entries);
TAILQ_INSERT_TAIL(&hardware.h_lport.p_vlans, &vlan3, v_entries);
/* Build packet */
n = edp_send(NULL, &hardware);
if (n != 0) {
fail("unable to build packet");
return;
}
if (TAILQ_EMPTY(&pkts)) {
fail("no packets sent");
return;
}
pkt = TAILQ_FIRST(&pkts);
ck_assert_int_eq(pkt->size, sizeof(pkt1));
fail_unless(memcmp(pkt->data, pkt1, sizeof(pkt1)) == 0);
pkt = TAILQ_NEXT(pkt, next);
if (!pkt) {
fail("need one more packet");
return;
}
ck_assert_int_eq(pkt->size, sizeof(pkt2));
fail_unless(memcmp(pkt->data, pkt2, sizeof(pkt2)) == 0);
fail_unless(TAILQ_NEXT(pkt, next) == NULL, "more than two packets sent");
}
END_TEST
#endif
START_TEST (test_recv_edp)
{
char pkt1[] = {
0x00, 0xe0, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x04,
0x96, 0x05, 0x44, 0x6f, 0x01, 0x44, 0xaa, 0xaa,
0x03, 0x00, 0xe0, 0x2b, 0x00, 0xbb, 0x01, 0x00,
0x01, 0x3c, 0x05, 0xdf, 0x03, 0x0f, 0x00, 0x00,
0x00, 0x04, 0x96, 0x05, 0x44, 0x6f, 0x99, 0x02,
0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x06,
0x04, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x99, 0x01, 0x01, 0x04, 0x6e, 0x65,
0x30, 0x35, 0x30, 0x31, 0x73, 0x77, 0x2e, 0x58,
0x58, 0x58, 0x58, 0x58, 0x58, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0x7b, 0x00, 0x00, 0x00,
0x00, 0x00, 0x80, 0x79, 0x0d, 0xec, 0xff, 0xff,
0xff, 0xff, 0x80, 0xa7, 0x8b, 0x24, 0x00, 0x00,
0x00, 0x00, 0x00, 0x17, 0x08, 0x7e, 0xe5, 0xe2,
0x00, 0x00, 0xee, 0xee, 0xee, 0xee, 0x00, 0x00,
0x00, 0x02, 0x81, 0xb2, 0x19, 0xf0, 0x00, 0x00,
0x00, 0x02, 0x80, 0xa5, 0x67, 0x20, 0xee, 0xee,
0xee, 0xee, 0x80, 0xea, 0x8c, 0xac, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0xe0, 0x2b, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0xe0, 0x2b, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x80, 0xa4, 0x86, 0x2c, 0xee, 0xee,
0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0x00, 0x00,
0x00, 0x00, 0xee, 0xee, 0xee, 0xee, 0x00, 0xe0,
0x2b, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
0x00, 0x00, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0x99, 0x00,
0x00, 0x04 };
/* This is:
IEEE 802.3 Ethernet
Destination: Extreme-EDP (00:e0:2b:00:00:00)
Source: ExtremeN_05:44:6f (00:04:96:05:44:6f)
Length: 324
Logical-Link Control
DSAP: SNAP (0xaa)
IG Bit: Individual
SSAP: SNAP (0xaa)
CR Bit: Command
Control field: U, func=UI (0x03)
000. 00.. = Command: Unnumbered Information (0x00)
.... ..11 = Frame type: Unnumbered frame (0x03)
Organization Code: Extreme Networks (0x00e02b)
PID: EDP (0x00bb)
Extreme Discovery Protocol
Version: 1
Reserved: 0
Data length: 316
Checksum: 0xdf05 [correct]
[Good: True]
[Bad : False]
Sequence number: 783
Machine ID type: MAC (0)
Machine MAC: ExtremeN_05:44:6f (00:04:96:05:44:6f)
Info: Slot/Port: 1/1, Version: 7.6.4.0
Marker 0x99, length 36, type 2 = Info
TLV Marker: 0x99
TLV type: Info (2)
TLV length: 36
Slot: 1
Port: 1
Virt chassis: 0
Reserved: 000000000000
Version: 7.6.4 Internal: 0
Version: 0x07060400
Version (major1): 7
Version (major2): 6
Version (sustaining): 4
Version (internal): 0
Connections: FFFF0000000000000000000000000000
Display: "ne0501sw.XXXXXX"
Marker 0x99, length 260, type 1 = Display
TLV Marker: 0x99
TLV type: Display (1)
TLV length: 260
Name: ne0501sw.XXXXXX
Null
Marker 0x99, length 4, type 0 = Null
TLV Marker: 0x99
TLV type: Null (0)
TLV length: 4
*/
#ifdef ENABLE_DOT1
char pkt2[] = {
0x00, 0xe0, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x04,
0x96, 0x05, 0x44, 0x6f, 0x01, 0x48, 0xaa, 0xaa,
0x03, 0x00, 0xe0, 0x2b, 0x00, 0xbb, 0x01, 0x00,
0x01, 0x40, 0x73, 0x04, 0x03, 0x10, 0x00, 0x00,
0x00, 0x04, 0x96, 0x05, 0x44, 0x6f, 0x99, 0x05,
0x00, 0x64, 0x80, 0x00, 0x00, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x65,
0x66, 0x61, 0x75, 0x6c, 0x74, 0x00, 0x43, 0x61,
0x6e, 0x6e, 0x6f, 0x74, 0x20, 0x73, 0x61, 0x76,
0x65, 0x20, 0x62, 0x72, 0x69, 0x64, 0x67, 0x65,
0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
0x20, 0x74, 0x6f, 0x20, 0x6e, 0x76, 0x20, 0x28,
0x25, 0x64, 0x29, 0x0a, 0x00, 0x00, 0x4e, 0x6f,
0x20, 0x62, 0x72, 0x69, 0x64, 0x67, 0x65, 0x20,
0x66, 0x6f, 0x72, 0x20, 0x73, 0x75, 0x70, 0x65,
0x72, 0x42, 0x72, 0x69, 0x64, 0x67, 0x65, 0x49,
0x6e, 0x73, 0x74, 0x20, 0x25, 0x64, 0x00, 0x00,
0x00, 0x00, 0x99, 0x05, 0x00, 0x64, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x4d, 0x61, 0x63, 0x56, 0x6c, 0x61,
0x6e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65,
0x72, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x05,
0x00, 0x64, 0x80, 0x00, 0x00, 0x32, 0x00, 0x00,
0x00, 0x00, 0x0a, 0x32, 0x00, 0x3f, 0x41, 0x64,
0x6d, 0x69, 0x6e, 0x42, 0x32, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x99, 0x00, 0x00, 0x04 };
/* This is:
IEEE 802.3 Ethernet
Destination: Extreme-EDP (00:e0:2b:00:00:00)
Source: ExtremeN_05:44:6f (00:04:96:05:44:6f)
Length: 328
Logical-Link Control
DSAP: SNAP (0xaa)
IG Bit: Individual
SSAP: SNAP (0xaa)
CR Bit: Command
Control field: U, func=UI (0x03)
000. 00.. = Command: Unnumbered Information (0x00)
.... ..11 = Frame type: Unnumbered frame (0x03)
Organization Code: Extreme Networks (0x00e02b)
PID: EDP (0x00bb)
Extreme Discovery Protocol
Version: 1
Reserved: 0
Data length: 320
Checksum: 0x7304 [correct]
[Good: True]
[Bad : False]
Sequence number: 784
Machine ID type: MAC (0)
Machine MAC: ExtremeN_05:44:6f (00:04:96:05:44:6f)
Vlan: ID 1, Name "Default"
Marker 0x99, length 100, type 5 = VL
TLV Marker: 0x99
TLV type: VL (5)
TLV length: 100
Flags: 0x80
1... .... = Flags-IP: Set
.000 000. = Flags-reserved: 0x00
.... ...0 = Flags-Unknown: Not set
Reserved1: 00
Vlan ID: 1
Reserved2: 00000000
IP addr: 0.0.0.0 (0.0.0.0)
Name: Default
Vlan: ID 0, Name "MacVlanDiscover"
Marker 0x99, length 100, type 5 = VL
TLV Marker: 0x99
TLV type: VL (5)
TLV length: 100
Flags: 0x00
0... .... = Flags-IP: Not set
.000 000. = Flags-reserved: 0x00
.... ...0 = Flags-Unknown: Not set
Reserved1: 00
Vlan ID: 0
Reserved2: 00000000
IP addr: 0.0.0.0 (0.0.0.0)
Name: MacVlanDiscover
Vlan: ID 50, Name "AdminB2"
Marker 0x99, length 100, type 5 = VL
TLV Marker: 0x99
TLV type: VL (5)
TLV length: 100
Flags: 0x80
1... .... = Flags-IP: Set
.000 000. = Flags-reserved: 0x00
.... ...0 = Flags-Unknown: Not set
Reserved1: 00
Vlan ID: 50
Reserved2: 00000000
IP addr: 10.50.0.63 (10.50.0.63)
Name: AdminB2
Null
Marker 0x99, length 4, type 0 = Null
TLV Marker: 0x99
TLV type: Null (0)
TLV length: 4
*/
struct lldpd_vlan *vlan;
#endif
struct lldpd_chassis *nchassis = NULL;
struct lldpd_port *nport = NULL;
struct lldpd cfg;
char mac1[] = { 0x00, 0x04, 0x96, 0x05, 0x44, 0x6f };
cfg.g_config.c_mgmt_pattern = NULL;
fail_unless(edp_decode(&cfg, pkt1, sizeof(pkt1), &hardware,
&nchassis, &nport) != -1);
if (!nchassis || !nport) {
fail("unable to decode packet");
return;
}
ck_assert_int_eq(nchassis->c_id_subtype,
LLDP_CHASSISID_SUBTYPE_LLADDR);
ck_assert_int_eq(nchassis->c_id_len, ETHER_ADDR_LEN);
fail_unless(memcmp(nchassis->c_id, mac1, ETHER_ADDR_LEN) == 0);
ck_assert_int_eq(nport->p_id_subtype,
LLDP_PORTID_SUBTYPE_IFNAME);
ck_assert_int_eq(nport->p_id_len, strlen("1/1"));
fail_unless(memcmp(nport->p_id,
"1/1", strlen("1/1")) == 0);
ck_assert_str_eq(nport->p_descr, "Slot 1 / Port 1");
ck_assert_str_eq(nchassis->c_name, "ne0501sw.XXXXXX");
ck_assert_str_eq(nchassis->c_descr, "EDP enabled device, version 7.6.4.0");
ck_assert_int_eq(nchassis->c_cap_enabled, 0);
#ifdef ENABLE_DOT1
/* Add this port to list of remote port for hardware port */
TAILQ_INSERT_TAIL(&hardware.h_rports, nport, p_entries);
nport->p_chassis = nchassis;
nport->p_protocol = LLDPD_MODE_EDP;
/* Recept second packet */
nchassis = NULL; nport = NULL;
fail_unless(edp_decode(&cfg, pkt2, sizeof(pkt2), &hardware,
&nchassis, &nport) == -1);
nport = TAILQ_FIRST(&hardware.h_rports);
if (!nport) {
fail("unable to find our previous port?");
return;
}
ck_assert_int_eq(TAILQ_FIRST(&nport->p_chassis->c_mgmt)->m_addr.inet.s_addr,
(u_int32_t)inet_addr("10.50.0.63"));
if (TAILQ_EMPTY(&nport->p_vlans)) {
fail("no VLAN");
return;
}
vlan = TAILQ_FIRST(&nport->p_vlans);
ck_assert_int_eq(vlan->v_vid, 1);
ck_assert_str_eq(vlan->v_name, "Default");
vlan = TAILQ_NEXT(vlan, v_entries);
if (!vlan) {
fail("no more VLAN");
return;
}
ck_assert_int_eq(vlan->v_vid, 0);
ck_assert_str_eq(vlan->v_name, "MacVlanDiscover");
vlan = TAILQ_NEXT(vlan, v_entries);
if (!vlan) {
fail("no more VLAN");
return;
}
ck_assert_int_eq(vlan->v_vid, 50);
ck_assert_str_eq(vlan->v_name, "AdminB2");
vlan = TAILQ_NEXT(vlan, v_entries);
fail_unless(vlan == NULL);
#endif
}
END_TEST
#endif
Suite *
edp_suite(void)
{
Suite *s = suite_create("EDP");
#ifdef ENABLE_EDP
TCase *tc_send = tcase_create("Send EDP packets");
TCase *tc_receive = tcase_create("Receive EDP packets");
tcase_add_checked_fixture(tc_send, pcap_setup, pcap_teardown);
tcase_add_test(tc_send, test_send_basic);
#ifdef ENABLE_DOT1
tcase_add_test(tc_send, test_send_vlans);
#endif
suite_add_tcase(s, tc_send);
tcase_add_test(tc_receive, test_recv_edp);
suite_add_tcase(s, tc_receive);
#endif
return s;
}
int
main()
{
int number_failed;
Suite *s = edp_suite ();
SRunner *sr = srunner_create (s);
srunner_set_fork_status (sr, CK_NOFORK); /* Can't fork because
we need to write
files */
srunner_run_all (sr, CK_ENV);
number_failed = srunner_ntests_failed (sr);
srunner_free (sr);
return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
}
| 11,620 |
852 |
# Start with a skeleton process which gets imported with the following line
from PhysicsTools.PatAlgos.patTemplate_cfg import *
# Load the standard PAT config
process.load( "PhysicsTools.PatAlgos.patSequences_cff" )
# Load the exercise config
process.load( "PhysicsTools.PatExamples.mcMatch_cfi" ) # The file to modify!
# Modify the default config according to needed exercise settings
# You can comment these lines in order to run the default rather than
# your OWN MC matching from PhysicsTools/PatExamples/python/mcMatching_cfi
# CAUTION: Uncommented, this does NOT run out-of-the-box!
# Own muon match
process.makeAllLayer1Muons.remove( process.muonMatch )
process.makeAllLayer1Muons += process.myMuonMatch
process.makeAllLayer1Muons.remove( process.allLayer1Muons )
process.makeAllLayer1Muons += process.allLayer1Muons
process.allLayer1Muons.genParticleMatch = "myMuonMatch"
process.allLayer1Muons.embedGenMatch = True
# Own jet match to MC jets
process.makeAllLayer1Jets.remove( process.jetGenJetMatch )
process.makeAllLayer1Jets += process.myJetGenJetMatch
process.makeAllLayer1Jets.remove( process.allLayer1Jets )
process.makeAllLayer1Jets += process.allLayer1Jets
process.allLayer1Jets.genJetMatch = "myJetGenJetMatch"
# Define the path
process.p = cms.Path(
process.patDefaultSequence
)
process.maxEvents.input = 1000 # Reduce number of events for testing.
process.out.fileName = 'edmPatMcMatch.root'
process.out.outputCommands += [ 'keep recoGenParticles_genParticles_*_*' ] # Keeps the MC objects for references
process.options.wantSummary = False # to suppress the long output at the end of the job
| 507 |
3,861 |
<reponame>crazyinsanejames/CloverBootloader<gh_stars>1000+
/*
* refit/scan/securebootkeys.h
*
* Copyright (c) 2006-2010 <NAME>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of <NAME> nor the names of the
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CloverDB.h"
#include "CanonicalDB.h"
#include "MSPCADB.h"
#include "MSUEFICADB.h"
#include "CloverKEK.h"
#include "MSKEK.h"
// Secure boot platform key
STATIC CONST UINT8 gSecureBootPlatformSignedKey[] = {
0xde, 0x07, 0x0b, 0x05, 0x01, 0x37, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xc0, 0x05, 0x00, 0x00, 0x00, 0x02, 0xf1, 0x0e, 0x9d, 0xd2, 0xaf, 0x4a, 0xdf, 0x68, 0xee, 0x49,
0x8a, 0xa9, 0x34, 0x7d, 0x37, 0x56, 0x65, 0xa7, 0x30, 0x82, 0x05, 0xa4, 0x06, 0x09, 0x2a, 0x86,
0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x02, 0xa0, 0x82, 0x05, 0x95, 0x30, 0x82, 0x05, 0x91, 0x02,
0x01, 0x01, 0x31, 0x0f, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02,
0x01, 0x05, 0x00, 0x30, 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01,
0xa0, 0x82, 0x03, 0x23, 0x30, 0x82, 0x03, 0x1f, 0x30, 0x82, 0x02, 0x07, 0xa0, 0x03, 0x02, 0x01,
0x02, 0x02, 0x09, 0x00, 0xf0, 0x33, 0x6a, 0xed, 0x40, 0x72, 0xc5, 0x15, 0x30, 0x0d, 0x06, 0x09,
0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x26, 0x31, 0x24, 0x30,
0x22, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x1b, 0x43, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x50,
0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x65, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x33, 0x31, 0x32, 0x30, 0x35, 0x30, 0x31, 0x34,
0x36, 0x31, 0x37, 0x5a, 0x17, 0x0d, 0x32, 0x33, 0x31, 0x32, 0x30, 0x33, 0x30, 0x31, 0x34, 0x36,
0x31, 0x37, 0x5a, 0x30, 0x26, 0x31, 0x24, 0x30, 0x22, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x1b,
0x43, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x20,
0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x30, 0x82, 0x01, 0x22, 0x30,
0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82,
0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xcb, 0xb1, 0x82, 0x68,
0xc1, 0x71, 0xab, 0xf5, 0x35, 0x03, 0xaa, 0x0e, 0xf7, 0x2e, 0x6d, 0x4b, 0x63, 0x98, 0x8f, 0xd6,
0xae, 0xb0, 0xeb, 0x95, 0x36, 0x70, 0xf9, 0x9d, 0xe4, 0xf5, 0x6b, 0x0d, 0xc0, 0x81, 0x36, 0x1d,
0xcf, 0x01, 0x45, 0x4c, 0xd6, 0xf0, 0xc5, 0x4e, 0x7b, 0x4e, 0x49, 0x1a, 0x84, 0x64, 0xc8, 0x7c,
0x87, 0xb8, 0xf3, 0xb2, 0x0a, 0xa1, 0x1e, 0xd6, 0xb7, 0x42, 0x2d, 0xc4, 0x79, 0x58, 0x19, 0x78,
0x9c, 0x7e, 0x76, 0x8d, 0xfa, 0xb9, 0x6a, 0xdc, 0x5b, 0xf8, 0x01, 0xa9, 0x53, 0x71, 0xc7, 0x0a,
0x9d, 0x73, 0x2c, 0x4c, 0xdf, 0xe5, 0x88, 0xb4, 0x10, 0x55, 0xe7, 0xea, 0xce, 0xca, 0xc8, 0xdd,
0x8a, 0xdb, 0x93, 0x43, 0x14, 0x59, 0xb4, 0x04, 0xab, 0xc7, 0xdc, 0xc7, 0x77, 0xc4, 0x85, 0xf6,
0x02, 0x59, 0x22, 0x91, 0xd0, 0x32, 0x78, 0x47, 0xf2, 0x2d, 0x0d, 0xd4, 0x81, 0x95, 0x8f, 0x8d,
0xd0, 0x8d, 0x35, 0x1f, 0x7c, 0x2f, 0x44, 0x13, 0x2b, 0x65, 0x3e, 0x78, 0xba, 0x02, 0x35, 0x2b,
0x56, 0x82, 0x3f, 0x44, 0x56, 0x6b, 0x78, 0x8a, 0x6e, 0xd7, 0xa2, 0xd2, 0xfd, 0xcc, 0x85, 0x43,
0x8d, 0x73, 0xd8, 0xb4, 0x2f, 0x60, 0xa9, 0x04, 0xc2, 0xa4, 0xc1, 0x8c, 0xfa, 0x70, 0x0f, 0xe6,
0xdb, 0xcf, 0xbe, 0x95, 0xd2, 0xce, 0xa6, 0x3a, 0x62, 0xed, 0x6d, 0xc8, 0xa4, 0xa1, 0x8a, 0xe2,
0xb9, 0x01, 0xfd, 0x39, 0x81, 0xa7, 0x2d, 0x6c, 0xa0, 0x8d, 0xc2, 0x49, 0xdb, 0x3a, 0xc7, 0x46,
0x9b, 0x46, 0x08, 0x17, 0xd4, 0xbb, 0x78, 0xb6, 0x36, 0x5c, 0xc8, 0x79, 0x9f, 0x62, 0xe7, 0xe2,
0x46, 0x07, 0x32, 0xbc, 0x18, 0xad, 0xf0, 0xb5, 0x24, 0x34, 0x49, 0xfd, 0xe0, 0x85, 0xcb, 0x22,
0xaa, 0xbc, 0x3a, 0x01, 0x36, 0x1a, 0x1a, 0x07, 0x05, 0x90, 0xb1, 0xf5, 0x02, 0x03, 0x01, 0x00,
0x01, 0xa3, 0x50, 0x30, 0x4e, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14,
0xb4, 0xc6, 0xdc, 0xeb, 0x96, 0x35, 0xe5, 0xe4, 0xde, 0xf9, 0x53, 0x75, 0x62, 0xf7, 0x7c, 0x79,
0x1a, 0x45, 0x91, 0x3e, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80,
0x14, 0xb4, 0xc6, 0xdc, 0xeb, 0x96, 0x35, 0xe5, 0xe4, 0xde, 0xf9, 0x53, 0x75, 0x62, 0xf7, 0x7c,
0x79, 0x1a, 0x45, 0x91, 0x3e, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, 0x03,
0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b,
0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x05, 0xda, 0x57, 0xea, 0x10, 0xfd, 0xe6, 0x19, 0x6c,
0x37, 0xde, 0xac, 0x5a, 0xe0, 0x6a, 0x87, 0x17, 0x94, 0x80, 0x5e, 0xa6, 0xe8, 0xb8, 0x8c, 0xe6,
0x43, 0xcc, 0x8b, 0x1d, 0x96, 0x87, 0xf2, 0xd9, 0xfd, 0xc7, 0xb3, 0x1c, 0x67, 0x7e, 0xf0, 0x60,
0x5e, 0x2b, 0xdd, 0x60, 0xc7, 0x18, 0xcc, 0x34, 0x8e, 0x0b, 0x4e, 0xda, 0x80, 0x30, 0x4b, 0xc9,
0x66, 0xeb, 0x3d, 0x95, 0xf6, 0xac, 0xd8, 0x76, 0x7e, 0x40, 0x30, 0x6f, 0xb0, 0xb3, 0xf6, 0x7d,
0x13, 0x99, 0x69, 0xdc, 0x6a, 0xd4, 0xd1, 0xc0, 0x71, 0x69, 0xf3, 0x14, 0x9c, 0x9e, 0xe1, 0xb9,
0x8e, 0x1e, 0xcb, 0x6b, 0x4e, 0xc9, 0xc0, 0x4e, 0x22, 0x4a, 0xe7, 0xd2, 0x37, 0x33, 0xd0, 0x15,
0xb9, 0x24, 0x04, 0xe8, 0xba, 0x6c, 0x25, 0x29, 0x67, 0xd1, 0xfb, 0x16, 0x11, 0xca, 0x68, 0xc4,
0x7c, 0x3f, 0x42, 0x6f, 0x7c, 0x59, 0x03, 0xf0, 0x59, 0x59, 0xd4, 0x72, 0x61, 0x8c, 0xb7, 0xc4,
0xe8, 0xf0, 0x4b, 0x98, 0x45, 0x95, 0x09, 0x77, 0x17, 0x94, 0x9f, 0xcc, 0xe4, 0xaa, 0x9d, 0xf0,
0x8f, 0xd8, 0x1c, 0x20, 0x2a, 0x92, 0xe0, 0x74, 0x0d, 0x05, 0xf0, 0xe7, 0x4f, 0x25, 0xd4, 0x72,
0xa1, 0xa4, 0xf1, 0x9a, 0x37, 0x84, 0x1f, 0x75, 0xb0, 0x8f, 0xc9, 0x71, 0x3e, 0x25, 0x28, 0x96,
0xf6, 0xd8, 0x40, 0xa3, 0x4e, 0xd7, 0xaa, 0x62, 0xe5, 0x87, 0xac, 0x06, 0x3f, 0x3c, 0x04, 0xe6,
0x64, 0x74, 0x37, 0x05, 0xe6, 0x65, 0xaa, 0xc0, 0x8f, 0x6c, 0xb7, 0x2f, 0x93, 0xf9, 0x95, 0x6a,
0x9d, 0x3c, 0xc7, 0xbb, 0x20, 0x86, 0x3e, 0x05, 0x04, 0x2a, 0xc3, 0xe1, 0xcb, 0x5f, 0x32, 0xd7,
0xa3, 0x9f, 0xb3, 0x1d, 0x84, 0xeb, 0x30, 0xeb, 0x8a, 0x10, 0xf0, 0xc7, 0x38, 0x2c, 0x18, 0x74,
0x2f, 0x76, 0x0a, 0x01, 0xda, 0xa5, 0x4f, 0x31, 0x82, 0x02, 0x45, 0x30, 0x82, 0x02, 0x41, 0x02,
0x01, 0x01, 0x30, 0x33, 0x30, 0x26, 0x31, 0x24, 0x30, 0x22, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c,
0x1b, 0x43, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d,
0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x02, 0x09, 0x00, 0xf0,
0x33, 0x6a, 0xed, 0x40, 0x72, 0xc5, 0x15, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65,
0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0xa0, 0x81, 0xe4, 0x30, 0x18, 0x06, 0x09, 0x2a, 0x86, 0x48,
0x86, 0xf7, 0x0d, 0x01, 0x09, 0x03, 0x31, 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
0x01, 0x07, 0x01, 0x30, 0x1c, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x05,
0x31, 0x0f, 0x17, 0x0d, 0x31, 0x33, 0x31, 0x32, 0x30, 0x35, 0x30, 0x31, 0x35, 0x35, 0x33, 0x34,
0x5a, 0x30, 0x2f, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x04, 0x31, 0x22,
0x04, 0x20, 0x9f, 0xac, 0xf0, 0x58, 0x65, 0x7c, 0x0d, 0xd6, 0xf5, 0xc5, 0x87, 0xd8, 0x7c, 0xc1,
0x29, 0xde, 0x6e, 0xdc, 0x81, 0x7c, 0x5c, 0x64, 0xcf, 0xd5, 0xf0, 0xab, 0x62, 0xa5, 0x18, 0x94,
0xa7, 0xc3, 0x30, 0x79, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x0f, 0x31,
0x6c, 0x30, 0x6a, 0x30, 0x0b, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2a,
0x30, 0x0b, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x16, 0x30, 0x0b, 0x06,
0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x02, 0x30, 0x0a, 0x06, 0x08, 0x2a, 0x86,
0x48, 0x86, 0xf7, 0x0d, 0x03, 0x07, 0x30, 0x0e, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
0x03, 0x02, 0x02, 0x02, 0x00, 0x80, 0x30, 0x0d, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
0x03, 0x02, 0x02, 0x01, 0x40, 0x30, 0x07, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x07, 0x30, 0x0d,
0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x02, 0x02, 0x01, 0x28, 0x30, 0x0d, 0x06,
0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x04, 0x82, 0x01, 0x00,
0x06, 0x80, 0x28, 0x6a, 0x64, 0x77, 0x33, 0x07, 0x55, 0xa6, 0x46, 0x6e, 0x85, 0x28, 0x57, 0xa1,
0xa7, 0x1e, 0x38, 0xbb, 0xa2, 0x6e, 0x71, 0x8a, 0x33, 0x09, 0xc3, 0x01, 0x21, 0xfb, 0xc2, 0x23,
0x48, 0x27, 0x86, 0x4a, 0x87, 0xd8, 0x2f, 0x6b, 0x77, 0xe3, 0xd2, 0xbf, 0x75, 0x5c, 0xd2, 0x47,
0xf3, 0xbf, 0x0a, 0xbe, 0xca, 0x41, 0x65, 0x90, 0xfb, 0x5b, 0x0c, 0xaf, 0x3f, 0x9a, 0x84, 0x9d,
0x88, 0x40, 0x96, 0x78, 0x06, 0x20, 0x12, 0xd2, 0xce, 0xf6, 0x96, 0xcb, 0xb6, 0xcb, 0xdb, 0xb6,
0xb7, 0x2b, 0xc3, 0x30, 0xea, 0xd6, 0x4e, 0xbb, 0x2e, 0x81, 0x0b, 0x57, 0x9b, 0x97, 0xd1, 0x85,
0xed, 0x10, 0xa2, 0x27, 0x89, 0xe1, 0x66, 0xab, 0x43, 0x6b, 0x5d, 0xeb, 0x9a, 0x65, 0x6d, 0x36,
0x1b, 0xe1, 0xe3, 0x46, 0xba, 0xaa, 0xf3, 0x8c, 0xc8, 0x91, 0x5d, 0x04, 0xc7, 0xb9, 0x27, 0xd7,
0xae, 0x0b, 0x05, 0x97, 0x8b, 0x90, 0x57, 0x63, 0xf4, 0xb0, 0x2d, 0x9a, 0x3d, 0x64, 0x44, 0xfb,
0x45, 0x86, 0x90, 0xed, 0x54, 0x6c, 0x93, 0x84, 0xf0, 0x09, 0xdf, 0xdc, 0x72, 0xb7, 0x21, 0xa2,
0x66, 0xfa, 0x06, 0x6e, 0xbb, 0x97, 0x07, 0x5b, 0x99, 0xd9, 0x60, 0x77, 0x0b, 0xaf, 0xe6, 0x8f,
0x66, 0x0b, 0x09, 0x44, 0xae, 0x82, 0x6e, 0xcc, 0x66, 0x81, 0x21, 0x2a, 0x81, 0xd6, 0x5c, 0xf2,
0x53, 0x23, 0x4c, 0x3b, 0x68, 0x39, 0xf7, 0x00, 0x69, 0xaf, 0xa3, 0x09, 0x6f, 0x40, 0x71, 0xd9,
0x4f, 0x5e, 0x52, 0x41, 0x31, 0x5f, 0xab, 0xe5, 0x62, 0x8b, 0x32, 0x49, 0xdf, 0x5d, 0x8c, 0x9d,
0xea, 0x68, 0x8c, 0x08, 0x3c, 0xc9, 0x99, 0x77, 0x4e, 0xa8, 0xa4, 0xc9, 0x8a, 0xdc, 0x88, 0x98,
0xac, 0xcb, 0x13, 0x03, 0x59, 0x63, 0xb5, 0xcf, 0xb2, 0xe0, 0x3e, 0x10, 0x4f, 0x6c, 0x0d, 0x7d,
0xa1, 0x59, 0xc0, 0xa5, 0xe4, 0x94, 0xa7, 0x4a, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72,
0x4f, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x00, 0x00, 0xe3, 0x28, 0x41, 0x5c,
0x03, 0xb5, 0x14, 0x45, 0xbe, 0x93, 0x01, 0x6e, 0xab, 0x3f, 0x99, 0x11, 0x30, 0x82, 0x03, 0x1f,
0x30, 0x82, 0x02, 0x07, 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0xf0, 0x33, 0x6a, 0xed,
0x40, 0x72, 0xc5, 0x15, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01,
0x0b, 0x05, 0x00, 0x30, 0x26, 0x31, 0x24, 0x30, 0x22, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x1b,
0x43, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x20,
0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x30, 0x1e, 0x17, 0x0d, 0x31,
0x33, 0x31, 0x32, 0x30, 0x35, 0x30, 0x31, 0x34, 0x36, 0x31, 0x37, 0x5a, 0x17, 0x0d, 0x32, 0x33,
0x31, 0x32, 0x30, 0x33, 0x30, 0x31, 0x34, 0x36, 0x31, 0x37, 0x5a, 0x30, 0x26, 0x31, 0x24, 0x30,
0x22, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x1b, 0x43, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x50,
0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x65, 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7,
0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02,
0x82, 0x01, 0x01, 0x00, 0xcb, 0xb1, 0x82, 0x68, 0xc1, 0x71, 0xab, 0xf5, 0x35, 0x03, 0xaa, 0x0e,
0xf7, 0x2e, 0x6d, 0x4b, 0x63, 0x98, 0x8f, 0xd6, 0xae, 0xb0, 0xeb, 0x95, 0x36, 0x70, 0xf9, 0x9d,
0xe4, 0xf5, 0x6b, 0x0d, 0xc0, 0x81, 0x36, 0x1d, 0xcf, 0x01, 0x45, 0x4c, 0xd6, 0xf0, 0xc5, 0x4e,
0x7b, 0x4e, 0x49, 0x1a, 0x84, 0x64, 0xc8, 0x7c, 0x87, 0xb8, 0xf3, 0xb2, 0x0a, 0xa1, 0x1e, 0xd6,
0xb7, 0x42, 0x2d, 0xc4, 0x79, 0x58, 0x19, 0x78, 0x9c, 0x7e, 0x76, 0x8d, 0xfa, 0xb9, 0x6a, 0xdc,
0x5b, 0xf8, 0x01, 0xa9, 0x53, 0x71, 0xc7, 0x0a, 0x9d, 0x73, 0x2c, 0x4c, 0xdf, 0xe5, 0x88, 0xb4,
0x10, 0x55, 0xe7, 0xea, 0xce, 0xca, 0xc8, 0xdd, 0x8a, 0xdb, 0x93, 0x43, 0x14, 0x59, 0xb4, 0x04,
0xab, 0xc7, 0xdc, 0xc7, 0x77, 0xc4, 0x85, 0xf6, 0x02, 0x59, 0x22, 0x91, 0xd0, 0x32, 0x78, 0x47,
0xf2, 0x2d, 0x0d, 0xd4, 0x81, 0x95, 0x8f, 0x8d, 0xd0, 0x8d, 0x35, 0x1f, 0x7c, 0x2f, 0x44, 0x13,
0x2b, 0x65, 0x3e, 0x78, 0xba, 0x02, 0x35, 0x2b, 0x56, 0x82, 0x3f, 0x44, 0x56, 0x6b, 0x78, 0x8a,
0x6e, 0xd7, 0xa2, 0xd2, 0xfd, 0xcc, 0x85, 0x43, 0x8d, 0x73, 0xd8, 0xb4, 0x2f, 0x60, 0xa9, 0x04,
0xc2, 0xa4, 0xc1, 0x8c, 0xfa, 0x70, 0x0f, 0xe6, 0xdb, 0xcf, 0xbe, 0x95, 0xd2, 0xce, 0xa6, 0x3a,
0x62, 0xed, 0x6d, 0xc8, 0xa4, 0xa1, 0x8a, 0xe2, 0xb9, 0x01, 0xfd, 0x39, 0x81, 0xa7, 0x2d, 0x6c,
0xa0, 0x8d, 0xc2, 0x49, 0xdb, 0x3a, 0xc7, 0x46, 0x9b, 0x46, 0x08, 0x17, 0xd4, 0xbb, 0x78, 0xb6,
0x36, 0x5c, 0xc8, 0x79, 0x9f, 0x62, 0xe7, 0xe2, 0x46, 0x07, 0x32, 0xbc, 0x18, 0xad, 0xf0, 0xb5,
0x24, 0x34, 0x49, 0xfd, 0xe0, 0x85, 0xcb, 0x22, 0xaa, 0xbc, 0x3a, 0x01, 0x36, 0x1a, 0x1a, 0x07,
0x05, 0x90, 0xb1, 0xf5, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x50, 0x30, 0x4e, 0x30, 0x1d, 0x06,
0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0xb4, 0xc6, 0xdc, 0xeb, 0x96, 0x35, 0xe5, 0xe4,
0xde, 0xf9, 0x53, 0x75, 0x62, 0xf7, 0x7c, 0x79, 0x1a, 0x45, 0x91, 0x3e, 0x30, 0x1f, 0x06, 0x03,
0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, 0x14, 0xb4, 0xc6, 0xdc, 0xeb, 0x96, 0x35, 0xe5,
0xe4, 0xde, 0xf9, 0x53, 0x75, 0x62, 0xf7, 0x7c, 0x79, 0x1a, 0x45, 0x91, 0x3e, 0x30, 0x0c, 0x06,
0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, 0x2a,
0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x05,
0xda, 0x57, 0xea, 0x10, 0xfd, 0xe6, 0x19, 0x6c, 0x37, 0xde, 0xac, 0x5a, 0xe0, 0x6a, 0x87, 0x17,
0x94, 0x80, 0x5e, 0xa6, 0xe8, 0xb8, 0x8c, 0xe6, 0x43, 0xcc, 0x8b, 0x1d, 0x96, 0x87, 0xf2, 0xd9,
0xfd, 0xc7, 0xb3, 0x1c, 0x67, 0x7e, 0xf0, 0x60, 0x5e, 0x2b, 0xdd, 0x60, 0xc7, 0x18, 0xcc, 0x34,
0x8e, 0x0b, 0x4e, 0xda, 0x80, 0x30, 0x4b, 0xc9, 0x66, 0xeb, 0x3d, 0x95, 0xf6, 0xac, 0xd8, 0x76,
0x7e, 0x40, 0x30, 0x6f, 0xb0, 0xb3, 0xf6, 0x7d, 0x13, 0x99, 0x69, 0xdc, 0x6a, 0xd4, 0xd1, 0xc0,
0x71, 0x69, 0xf3, 0x14, 0x9c, 0x9e, 0xe1, 0xb9, 0x8e, 0x1e, 0xcb, 0x6b, 0x4e, 0xc9, 0xc0, 0x4e,
0x22, 0x4a, 0xe7, 0xd2, 0x37, 0x33, 0xd0, 0x15, 0xb9, 0x24, 0x04, 0xe8, 0xba, 0x6c, 0x25, 0x29,
0x67, 0xd1, 0xfb, 0x16, 0x11, 0xca, 0x68, 0xc4, 0x7c, 0x3f, 0x42, 0x6f, 0x7c, 0x59, 0x03, 0xf0,
0x59, 0x59, 0xd4, 0x72, 0x61, 0x8c, 0xb7, 0xc4, 0xe8, 0xf0, 0x4b, 0x98, 0x45, 0x95, 0x09, 0x77,
0x17, 0x94, 0x9f, 0xcc, 0xe4, 0xaa, 0x9d, 0xf0, 0x8f, 0xd8, 0x1c, 0x20, 0x2a, 0x92, 0xe0, 0x74,
0x0d, 0x05, 0xf0, 0xe7, 0x4f, 0x25, 0xd4, 0x72, 0xa1, 0xa4, 0xf1, 0x9a, 0x37, 0x84, 0x1f, 0x75,
0xb0, 0x8f, 0xc9, 0x71, 0x3e, 0x25, 0x28, 0x96, 0xf6, 0xd8, 0x40, 0xa3, 0x4e, 0xd7, 0xaa, 0x62,
0xe5, 0x87, 0xac, 0x06, 0x3f, 0x3c, 0x04, 0xe6, 0x64, 0x74, 0x37, 0x05, 0xe6, 0x65, 0xaa, 0xc0,
0x8f, 0x6c, 0xb7, 0x2f, 0x93, 0xf9, 0x95, 0x6a, 0x9d, 0x3c, 0xc7, 0xbb, 0x20, 0x86, 0x3e, 0x05,
0x04, 0x2a, 0xc3, 0xe1, 0xcb, 0x5f, 0x32, 0xd7, 0xa3, 0x9f, 0xb3, 0x1d, 0x84, 0xeb, 0x30, 0xeb,
0x8a, 0x10, 0xf0, 0xc7, 0x38, 0x2c, 0x18, 0x74, 0x2f, 0x76, 0x0a, 0x01, 0xda, 0xa5, 0x4f,
};
// Secure boot null key
STATIC CONST UINT8 gSecureBootPlatformNullSignedKey[] = {
0xde, 0x07, 0x0b, 0x05, 0x01, 0x37, 0x2c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xc0, 0x05, 0x00, 0x00, 0x00, 0x02, 0xf1, 0x0e, 0x9d, 0xd2, 0xaf, 0x4a, 0xdf, 0x68, 0xee, 0x49,
0x8a, 0xa9, 0x34, 0x7d, 0x37, 0x56, 0x65, 0xa7, 0x30, 0x82, 0x05, 0xa4, 0x06, 0x09, 0x2a, 0x86,
0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x02, 0xa0, 0x82, 0x05, 0x95, 0x30, 0x82, 0x05, 0x91, 0x02,
0x01, 0x01, 0x31, 0x0f, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02,
0x01, 0x05, 0x00, 0x30, 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x07, 0x01,
0xa0, 0x82, 0x03, 0x23, 0x30, 0x82, 0x03, 0x1f, 0x30, 0x82, 0x02, 0x07, 0xa0, 0x03, 0x02, 0x01,
0x02, 0x02, 0x09, 0x00, 0xf0, 0x33, 0x6a, 0xed, 0x40, 0x72, 0xc5, 0x15, 0x30, 0x0d, 0x06, 0x09,
0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b, 0x05, 0x00, 0x30, 0x26, 0x31, 0x24, 0x30,
0x22, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x1b, 0x43, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x50,
0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
0x61, 0x74, 0x65, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x33, 0x31, 0x32, 0x30, 0x35, 0x30, 0x31, 0x34,
0x36, 0x31, 0x37, 0x5a, 0x17, 0x0d, 0x32, 0x33, 0x31, 0x32, 0x30, 0x33, 0x30, 0x31, 0x34, 0x36,
0x31, 0x37, 0x5a, 0x30, 0x26, 0x31, 0x24, 0x30, 0x22, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x1b,
0x43, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x20,
0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x30, 0x82, 0x01, 0x22, 0x30,
0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82,
0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xcb, 0xb1, 0x82, 0x68,
0xc1, 0x71, 0xab, 0xf5, 0x35, 0x03, 0xaa, 0x0e, 0xf7, 0x2e, 0x6d, 0x4b, 0x63, 0x98, 0x8f, 0xd6,
0xae, 0xb0, 0xeb, 0x95, 0x36, 0x70, 0xf9, 0x9d, 0xe4, 0xf5, 0x6b, 0x0d, 0xc0, 0x81, 0x36, 0x1d,
0xcf, 0x01, 0x45, 0x4c, 0xd6, 0xf0, 0xc5, 0x4e, 0x7b, 0x4e, 0x49, 0x1a, 0x84, 0x64, 0xc8, 0x7c,
0x87, 0xb8, 0xf3, 0xb2, 0x0a, 0xa1, 0x1e, 0xd6, 0xb7, 0x42, 0x2d, 0xc4, 0x79, 0x58, 0x19, 0x78,
0x9c, 0x7e, 0x76, 0x8d, 0xfa, 0xb9, 0x6a, 0xdc, 0x5b, 0xf8, 0x01, 0xa9, 0x53, 0x71, 0xc7, 0x0a,
0x9d, 0x73, 0x2c, 0x4c, 0xdf, 0xe5, 0x88, 0xb4, 0x10, 0x55, 0xe7, 0xea, 0xce, 0xca, 0xc8, 0xdd,
0x8a, 0xdb, 0x93, 0x43, 0x14, 0x59, 0xb4, 0x04, 0xab, 0xc7, 0xdc, 0xc7, 0x77, 0xc4, 0x85, 0xf6,
0x02, 0x59, 0x22, 0x91, 0xd0, 0x32, 0x78, 0x47, 0xf2, 0x2d, 0x0d, 0xd4, 0x81, 0x95, 0x8f, 0x8d,
0xd0, 0x8d, 0x35, 0x1f, 0x7c, 0x2f, 0x44, 0x13, 0x2b, 0x65, 0x3e, 0x78, 0xba, 0x02, 0x35, 0x2b,
0x56, 0x82, 0x3f, 0x44, 0x56, 0x6b, 0x78, 0x8a, 0x6e, 0xd7, 0xa2, 0xd2, 0xfd, 0xcc, 0x85, 0x43,
0x8d, 0x73, 0xd8, 0xb4, 0x2f, 0x60, 0xa9, 0x04, 0xc2, 0xa4, 0xc1, 0x8c, 0xfa, 0x70, 0x0f, 0xe6,
0xdb, 0xcf, 0xbe, 0x95, 0xd2, 0xce, 0xa6, 0x3a, 0x62, 0xed, 0x6d, 0xc8, 0xa4, 0xa1, 0x8a, 0xe2,
0xb9, 0x01, 0xfd, 0x39, 0x81, 0xa7, 0x2d, 0x6c, 0xa0, 0x8d, 0xc2, 0x49, 0xdb, 0x3a, 0xc7, 0x46,
0x9b, 0x46, 0x08, 0x17, 0xd4, 0xbb, 0x78, 0xb6, 0x36, 0x5c, 0xc8, 0x79, 0x9f, 0x62, 0xe7, 0xe2,
0x46, 0x07, 0x32, 0xbc, 0x18, 0xad, 0xf0, 0xb5, 0x24, 0x34, 0x49, 0xfd, 0xe0, 0x85, 0xcb, 0x22,
0xaa, 0xbc, 0x3a, 0x01, 0x36, 0x1a, 0x1a, 0x07, 0x05, 0x90, 0xb1, 0xf5, 0x02, 0x03, 0x01, 0x00,
0x01, 0xa3, 0x50, 0x30, 0x4e, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14,
0xb4, 0xc6, 0xdc, 0xeb, 0x96, 0x35, 0xe5, 0xe4, 0xde, 0xf9, 0x53, 0x75, 0x62, 0xf7, 0x7c, 0x79,
0x1a, 0x45, 0x91, 0x3e, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80,
0x14, 0xb4, 0xc6, 0xdc, 0xeb, 0x96, 0x35, 0xe5, 0xe4, 0xde, 0xf9, 0x53, 0x75, 0x62, 0xf7, 0x7c,
0x79, 0x1a, 0x45, 0x91, 0x3e, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, 0x03,
0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x0b,
0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, 0x05, 0xda, 0x57, 0xea, 0x10, 0xfd, 0xe6, 0x19, 0x6c,
0x37, 0xde, 0xac, 0x5a, 0xe0, 0x6a, 0x87, 0x17, 0x94, 0x80, 0x5e, 0xa6, 0xe8, 0xb8, 0x8c, 0xe6,
0x43, 0xcc, 0x8b, 0x1d, 0x96, 0x87, 0xf2, 0xd9, 0xfd, 0xc7, 0xb3, 0x1c, 0x67, 0x7e, 0xf0, 0x60,
0x5e, 0x2b, 0xdd, 0x60, 0xc7, 0x18, 0xcc, 0x34, 0x8e, 0x0b, 0x4e, 0xda, 0x80, 0x30, 0x4b, 0xc9,
0x66, 0xeb, 0x3d, 0x95, 0xf6, 0xac, 0xd8, 0x76, 0x7e, 0x40, 0x30, 0x6f, 0xb0, 0xb3, 0xf6, 0x7d,
0x13, 0x99, 0x69, 0xdc, 0x6a, 0xd4, 0xd1, 0xc0, 0x71, 0x69, 0xf3, 0x14, 0x9c, 0x9e, 0xe1, 0xb9,
0x8e, 0x1e, 0xcb, 0x6b, 0x4e, 0xc9, 0xc0, 0x4e, 0x22, 0x4a, 0xe7, 0xd2, 0x37, 0x33, 0xd0, 0x15,
0xb9, 0x24, 0x04, 0xe8, 0xba, 0x6c, 0x25, 0x29, 0x67, 0xd1, 0xfb, 0x16, 0x11, 0xca, 0x68, 0xc4,
0x7c, 0x3f, 0x42, 0x6f, 0x7c, 0x59, 0x03, 0xf0, 0x59, 0x59, 0xd4, 0x72, 0x61, 0x8c, 0xb7, 0xc4,
0xe8, 0xf0, 0x4b, 0x98, 0x45, 0x95, 0x09, 0x77, 0x17, 0x94, 0x9f, 0xcc, 0xe4, 0xaa, 0x9d, 0xf0,
0x8f, 0xd8, 0x1c, 0x20, 0x2a, 0x92, 0xe0, 0x74, 0x0d, 0x05, 0xf0, 0xe7, 0x4f, 0x25, 0xd4, 0x72,
0xa1, 0xa4, 0xf1, 0x9a, 0x37, 0x84, 0x1f, 0x75, 0xb0, 0x8f, 0xc9, 0x71, 0x3e, 0x25, 0x28, 0x96,
0xf6, 0xd8, 0x40, 0xa3, 0x4e, 0xd7, 0xaa, 0x62, 0xe5, 0x87, 0xac, 0x06, 0x3f, 0x3c, 0x04, 0xe6,
0x64, 0x74, 0x37, 0x05, 0xe6, 0x65, 0xaa, 0xc0, 0x8f, 0x6c, 0xb7, 0x2f, 0x93, 0xf9, 0x95, 0x6a,
0x9d, 0x3c, 0xc7, 0xbb, 0x20, 0x86, 0x3e, 0x05, 0x04, 0x2a, 0xc3, 0xe1, 0xcb, 0x5f, 0x32, 0xd7,
0xa3, 0x9f, 0xb3, 0x1d, 0x84, 0xeb, 0x30, 0xeb, 0x8a, 0x10, 0xf0, 0xc7, 0x38, 0x2c, 0x18, 0x74,
0x2f, 0x76, 0x0a, 0x01, 0xda, 0xa5, 0x4f, 0x31, 0x82, 0x02, 0x45, 0x30, 0x82, 0x02, 0x41, 0x02,
0x01, 0x01, 0x30, 0x33, 0x30, 0x26, 0x31, 0x24, 0x30, 0x22, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c,
0x1b, 0x43, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d,
0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x02, 0x09, 0x00, 0xf0,
0x33, 0x6a, 0xed, 0x40, 0x72, 0xc5, 0x15, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65,
0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0xa0, 0x81, 0xe4, 0x30, 0x18, 0x06, 0x09, 0x2a, 0x86, 0x48,
0x86, 0xf7, 0x0d, 0x01, 0x09, 0x03, 0x31, 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
0x01, 0x07, 0x01, 0x30, 0x1c, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x05,
0x31, 0x0f, 0x17, 0x0d, 0x31, 0x33, 0x31, 0x32, 0x30, 0x35, 0x30, 0x31, 0x35, 0x35, 0x34, 0x34,
0x5a, 0x30, 0x2f, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x04, 0x31, 0x22,
0x04, 0x20, 0xc7, 0x14, 0x77, 0x0e, 0xc4, 0xeb, 0x54, 0x8c, 0xf5, 0x23, 0x81, 0x25, 0x23, 0xd5,
0x31, 0x11, 0x3d, 0xdf, 0xe9, 0xaf, 0x8d, 0x76, 0xa1, 0xc0, 0xa2, 0xc6, 0x49, 0x82, 0x6a, 0xc9,
0x1e, 0xae, 0x30, 0x79, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x0f, 0x31,
0x6c, 0x30, 0x6a, 0x30, 0x0b, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x2a,
0x30, 0x0b, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x16, 0x30, 0x0b, 0x06,
0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x01, 0x02, 0x30, 0x0a, 0x06, 0x08, 0x2a, 0x86,
0x48, 0x86, 0xf7, 0x0d, 0x03, 0x07, 0x30, 0x0e, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
0x03, 0x02, 0x02, 0x02, 0x00, 0x80, 0x30, 0x0d, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d,
0x03, 0x02, 0x02, 0x01, 0x40, 0x30, 0x07, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x07, 0x30, 0x0d,
0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x03, 0x02, 0x02, 0x01, 0x28, 0x30, 0x0d, 0x06,
0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x04, 0x82, 0x01, 0x00,
0x2d, 0x05, 0x15, 0x6b, 0x79, 0xdf, 0x3e, 0x09, 0x8c, 0xf5, 0x4e, 0x47, 0xe1, 0x87, 0x92, 0xa1,
0x60, 0xb7, 0x59, 0x74, 0xaa, 0x49, 0x72, 0x08, 0x09, 0x61, 0x40, 0x35, 0x25, 0x8f, 0xba, 0x68,
0x72, 0x95, 0xc5, 0x70, 0x2c, 0x50, 0x5b, 0xb5, 0x8d, 0x6d, 0x1d, 0x71, 0x4a, 0x18, 0x85, 0x09,
0x90, 0x05, 0x3c, 0x75, 0xbf, 0x71, 0xc1, 0x0a, 0xb5, 0x9b, 0x1a, 0xd6, 0xe4, 0xd9, 0xca, 0x60,
0x7e, 0x77, 0x8d, 0x1e, 0x4e, 0xc8, 0x83, 0xe0, 0x65, 0x6b, 0x8c, 0x24, 0x74, 0xe5, 0x4c, 0xc1,
0xc8, 0x99, 0x99, 0xa9, 0xed, 0x44, 0xae, 0x11, 0x99, 0xed, 0xe8, 0x91, 0x84, 0x02, 0x4b, 0x2a,
0x00, 0x5f, 0xe4, 0xb0, 0x42, 0xb1, 0xb3, 0x0d, 0x46, 0xd9, 0xc8, 0x5e, 0x40, 0xf6, 0xea, 0x8c,
0xb0, 0xfe, 0x68, 0x7e, 0x24, 0x23, 0x76, 0xe1, 0xf0, 0xd1, 0xf7, 0x03, 0xb0, 0xd5, 0xf5, 0x9c,
0xc6, 0x19, 0x51, 0x46, 0x0e, 0x68, 0x35, 0xfa, 0xbf, 0x34, 0x04, 0xd8, 0xc7, 0x9c, 0xd1, 0xdf,
0x6b, 0x1b, 0x00, 0xd5, 0x4a, 0x7a, 0x8f, 0x69, 0x65, 0x8d, 0x56, 0xed, 0xed, 0x95, 0xd4, 0xe0,
0x7f, 0x4a, 0x8c, 0x82, 0x51, 0x22, 0x11, 0x3f, 0x9e, 0x77, 0xb6, 0x27, 0x8e, 0xff, 0x23, 0x77,
0xcd, 0x72, 0xc6, 0xa9, 0xf5, 0x66, 0x0a, 0x24, 0x5d, 0x80, 0xb5, 0x3a, 0x12, 0x29, 0x9e, 0x5f,
0xed, 0xc1, 0xb4, 0xd5, 0x37, 0x9a, 0xa4, 0x3e, 0xe0, 0x4f, 0x7f, 0x85, 0xca, 0x6f, 0x9f, 0xd2,
0x29, 0xfb, 0xdb, 0x0d, 0x2d, 0x66, 0x85, 0x91, 0xc7, 0x14, 0x6a, 0x8e, 0x1c, 0xac, 0x4a, 0x79,
0xa2, 0x75, 0x81, 0xd2, 0x46, 0x9b, 0x17, 0x78, 0x8e, 0x39, 0xf8, 0xf9, 0xe7, 0xfb, 0x79, 0x2e,
0x3d, 0x12, 0x68, 0xf6, 0x84, 0xff, 0x47, 0x01, 0x9b, 0x90, 0x00, 0x17, 0x01, 0x1a, 0xa8, 0xb9,
};
// Secure boot exchange key
STATIC CONST UINT8 gSecureBootExchangeKey[] = {
0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x43, 0x45, 0x52, 0x54, 0x49,
0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x44,
0x48, 0x7a, 0x43, 0x43, 0x41, 0x67, 0x65, 0x67, 0x41, 0x77, 0x49, 0x42, 0x41, 0x67, 0x49, 0x4a,
0x41, 0x49, 0x73, 0x55, 0x75, 0x63, 0x54, 0x53, 0x45, 0x4c, 0x64, 0x75, 0x4d, 0x41, 0x30, 0x47,
0x43, 0x53, 0x71, 0x47, 0x53, 0x49, 0x62, 0x33, 0x44, 0x51, 0x45, 0x42, 0x43, 0x77, 0x55, 0x41,
0x4d, 0x43, 0x59, 0x78, 0x4a, 0x44, 0x41, 0x69, 0x42, 0x67, 0x4e, 0x56, 0x0a, 0x42, 0x41, 0x4d,
0x4d, 0x47, 0x30, 0x4e, 0x73, 0x62, 0x33, 0x5a, 0x6c, 0x63, 0x69, 0x42, 0x46, 0x65, 0x47, 0x4e,
0x6f, 0x59, 0x57, 0x35, 0x6e, 0x5a, 0x53, 0x42, 0x44, 0x5a, 0x58, 0x4a, 0x30, 0x61, 0x57, 0x5a,
0x70, 0x59, 0x32, 0x46, 0x30, 0x5a, 0x54, 0x41, 0x65, 0x46, 0x77, 0x30, 0x78, 0x4d, 0x7a, 0x45,
0x79, 0x4d, 0x44, 0x55, 0x77, 0x4d, 0x54, 0x51, 0x32, 0x4e, 0x44, 0x52, 0x61, 0x0a, 0x46, 0x77,
0x30, 0x79, 0x4d, 0x7a, 0x45, 0x79, 0x4d, 0x44, 0x4d, 0x77, 0x4d, 0x54, 0x51, 0x32, 0x4e, 0x44,
0x52, 0x61, 0x4d, 0x43, 0x59, 0x78, 0x4a, 0x44, 0x41, 0x69, 0x42, 0x67, 0x4e, 0x56, 0x42, 0x41,
0x4d, 0x4d, 0x47, 0x30, 0x4e, 0x73, 0x62, 0x33, 0x5a, 0x6c, 0x63, 0x69, 0x42, 0x46, 0x65, 0x47,
0x4e, 0x6f, 0x59, 0x57, 0x35, 0x6e, 0x5a, 0x53, 0x42, 0x44, 0x5a, 0x58, 0x4a, 0x30, 0x0a, 0x61,
0x57, 0x5a, 0x70, 0x59, 0x32, 0x46, 0x30, 0x5a, 0x54, 0x43, 0x43, 0x41, 0x53, 0x49, 0x77, 0x44,
0x51, 0x59, 0x4a, 0x4b, 0x6f, 0x5a, 0x49, 0x68, 0x76, 0x63, 0x4e, 0x41, 0x51, 0x45, 0x42, 0x42,
0x51, 0x41, 0x44, 0x67, 0x67, 0x45, 0x50, 0x41, 0x44, 0x43, 0x43, 0x41, 0x51, 0x6f, 0x43, 0x67,
0x67, 0x45, 0x42, 0x41, 0x4e, 0x49, 0x6d, 0x4a, 0x61, 0x4b, 0x44, 0x67, 0x76, 0x66, 0x58, 0x0a,
0x37, 0x68, 0x62, 0x46, 0x2f, 0x42, 0x38, 0x59, 0x6f, 0x75, 0x6f, 0x36, 0x76, 0x6a, 0x31, 0x64,
0x54, 0x49, 0x57, 0x34, 0x42, 0x4c, 0x77, 0x49, 0x78, 0x4f, 0x55, 0x4a, 0x4e, 0x78, 0x78, 0x76,
0x31, 0x4d, 0x4e, 0x79, 0x2f, 0x77, 0x48, 0x53, 0x2f, 0x61, 0x44, 0x2f, 0x66, 0x55, 0x55, 0x70,
0x6a, 0x4b, 0x79, 0x62, 0x41, 0x38, 0x61, 0x61, 0x7a, 0x62, 0x36, 0x61, 0x6d, 0x75, 0x64, 0x76,
0x0a, 0x51, 0x38, 0x66, 0x47, 0x6c, 0x4f, 0x36, 0x51, 0x4c, 0x62, 0x37, 0x53, 0x77, 0x62, 0x57,
0x6a, 0x54, 0x5a, 0x2b, 0x4e, 0x59, 0x51, 0x56, 0x30, 0x58, 0x33, 0x70, 0x56, 0x33, 0x64, 0x31,
0x52, 0x49, 0x69, 0x72, 0x4b, 0x49, 0x36, 0x50, 0x44, 0x38, 0x2f, 0x48, 0x51, 0x63, 0x71, 0x4d,
0x78, 0x6b, 0x79, 0x66, 0x36, 0x47, 0x4c, 0x70, 0x61, 0x6c, 0x6f, 0x33, 0x30, 0x34, 0x55, 0x31,
0x2f, 0x0a, 0x33, 0x33, 0x65, 0x36, 0x6f, 0x32, 0x37, 0x4a, 0x79, 0x68, 0x4a, 0x64, 0x78, 0x44,
0x4e, 0x68, 0x4d, 0x56, 0x36, 0x77, 0x37, 0x4b, 0x69, 0x42, 0x7a, 0x68, 0x37, 0x6f, 0x6b, 0x32,
0x55, 0x47, 0x57, 0x45, 0x65, 0x42, 0x72, 0x2b, 0x6e, 0x74, 0x5a, 0x7a, 0x50, 0x35, 0x41, 0x67,
0x31, 0x6e, 0x4a, 0x4e, 0x4e, 0x6e, 0x79, 0x78, 0x35, 0x54, 0x55, 0x52, 0x71, 0x66, 0x33, 0x74,
0x55, 0x37, 0x0a, 0x32, 0x52, 0x4a, 0x67, 0x71, 0x63, 0x65, 0x38, 0x44, 0x30, 0x58, 0x53, 0x73,
0x58, 0x78, 0x52, 0x46, 0x59, 0x4b, 0x7a, 0x48, 0x30, 0x5a, 0x36, 0x7a, 0x75, 0x74, 0x35, 0x2f,
0x47, 0x31, 0x34, 0x30, 0x79, 0x44, 0x54, 0x6e, 0x48, 0x41, 0x77, 0x62, 0x35, 0x45, 0x58, 0x30,
0x35, 0x30, 0x64, 0x34, 0x74, 0x4c, 0x65, 0x54, 0x43, 0x6d, 0x42, 0x37, 0x30, 0x31, 0x33, 0x61,
0x35, 0x52, 0x55, 0x0a, 0x57, 0x6a, 0x41, 0x4a, 0x31, 0x43, 0x73, 0x6a, 0x6b, 0x51, 0x65, 0x42,
0x35, 0x4c, 0x72, 0x39, 0x75, 0x4d, 0x45, 0x63, 0x4f, 0x52, 0x48, 0x49, 0x36, 0x6d, 0x75, 0x79,
0x44, 0x57, 0x47, 0x4a, 0x30, 0x44, 0x4d, 0x6e, 0x73, 0x6e, 0x32, 0x48, 0x69, 0x77, 0x4c, 0x6f,
0x35, 0x5a, 0x66, 0x30, 0x62, 0x77, 0x69, 0x42, 0x42, 0x34, 0x36, 0x70, 0x74, 0x63, 0x76, 0x44,
0x4a, 0x45, 0x69, 0x58, 0x0a, 0x42, 0x64, 0x31, 0x64, 0x65, 0x59, 0x36, 0x75, 0x64, 0x59, 0x6b,
0x43, 0x41, 0x77, 0x45, 0x41, 0x41, 0x61, 0x4e, 0x51, 0x4d, 0x45, 0x34, 0x77, 0x48, 0x51, 0x59,
0x44, 0x56, 0x52, 0x30, 0x4f, 0x42, 0x42, 0x59, 0x45, 0x46, 0x44, 0x46, 0x70, 0x52, 0x70, 0x65,
0x69, 0x2b, 0x36, 0x59, 0x35, 0x7a, 0x4f, 0x76, 0x57, 0x51, 0x31, 0x57, 0x6e, 0x71, 0x44, 0x33,
0x6b, 0x78, 0x65, 0x37, 0x74, 0x0a, 0x4d, 0x42, 0x38, 0x47, 0x41, 0x31, 0x55, 0x64, 0x49, 0x77,
0x51, 0x59, 0x4d, 0x42, 0x61, 0x41, 0x46, 0x44, 0x46, 0x70, 0x52, 0x70, 0x65, 0x69, 0x2b, 0x36,
0x59, 0x35, 0x7a, 0x4f, 0x76, 0x57, 0x51, 0x31, 0x57, 0x6e, 0x71, 0x44, 0x33, 0x6b, 0x78, 0x65,
0x37, 0x74, 0x4d, 0x41, 0x77, 0x47, 0x41, 0x31, 0x55, 0x64, 0x45, 0x77, 0x51, 0x46, 0x4d, 0x41,
0x4d, 0x42, 0x41, 0x66, 0x38, 0x77, 0x0a, 0x44, 0x51, 0x59, 0x4a, 0x4b, 0x6f, 0x5a, 0x49, 0x68,
0x76, 0x63, 0x4e, 0x41, 0x51, 0x45, 0x4c, 0x42, 0x51, 0x41, 0x44, 0x67, 0x67, 0x45, 0x42, 0x41,
0x4b, 0x4e, 0x58, 0x42, 0x35, 0x48, 0x6e, 0x75, 0x41, 0x49, 0x45, 0x70, 0x35, 0x56, 0x31, 0x31,
0x30, 0x61, 0x58, 0x78, 0x62, 0x33, 0x37, 0x50, 0x50, 0x33, 0x38, 0x53, 0x6a, 0x65, 0x6f, 0x77,
0x49, 0x4b, 0x35, 0x51, 0x66, 0x6f, 0x76, 0x0a, 0x70, 0x58, 0x31, 0x59, 0x50, 0x6c, 0x6a, 0x32,
0x59, 0x71, 0x37, 0x58, 0x6c, 0x69, 0x79, 0x67, 0x76, 0x58, 0x50, 0x68, 0x64, 0x75, 0x46, 0x6f,
0x41, 0x4b, 0x77, 0x6a, 0x2f, 0x76, 0x59, 0x4c, 0x70, 0x66, 0x6a, 0x5a, 0x4a, 0x66, 0x42, 0x48,
0x35, 0x35, 0x6e, 0x58, 0x37, 0x57, 0x72, 0x5a, 0x6b, 0x57, 0x64, 0x65, 0x41, 0x4e, 0x44, 0x47,
0x34, 0x4a, 0x30, 0x75, 0x36, 0x4a, 0x65, 0x76, 0x0a, 0x35, 0x64, 0x44, 0x4f, 0x34, 0x46, 0x56,
0x73, 0x74, 0x2f, 0x7a, 0x66, 0x56, 0x4b, 0x4c, 0x52, 0x6f, 0x58, 0x59, 0x57, 0x35, 0x74, 0x6c,
0x7a, 0x79, 0x35, 0x4e, 0x79, 0x56, 0x47, 0x5a, 0x30, 0x31, 0x73, 0x42, 0x35, 0x47, 0x78, 0x4a,
0x58, 0x59, 0x74, 0x54, 0x4d, 0x61, 0x58, 0x47, 0x37, 0x30, 0x4e, 0x68, 0x69, 0x46, 0x45, 0x2b,
0x69, 0x42, 0x31, 0x6d, 0x6e, 0x33, 0x63, 0x6c, 0x35, 0x0a, 0x54, 0x4b, 0x69, 0x39, 0x45, 0x65,
0x30, 0x39, 0x48, 0x45, 0x54, 0x4e, 0x4e, 0x57, 0x55, 0x71, 0x75, 0x61, 0x76, 0x7a, 0x6f, 0x66,
0x4b, 0x67, 0x79, 0x2b, 0x7a, 0x74, 0x62, 0x31, 0x68, 0x39, 0x36, 0x72, 0x2b, 0x54, 0x2b, 0x66,
0x4a, 0x4a, 0x46, 0x64, 0x51, 0x36, 0x5a, 0x50, 0x69, 0x70, 0x79, 0x59, 0x32, 0x2b, 0x67, 0x53,
0x55, 0x41, 0x38, 0x72, 0x2b, 0x34, 0x57, 0x6e, 0x4d, 0x73, 0x0a, 0x50, 0x30, 0x37, 0x77, 0x43,
0x74, 0x6d, 0x54, 0x59, 0x34, 0x52, 0x2b, 0x33, 0x64, 0x51, 0x5a, 0x72, 0x2f, 0x31, 0x2f, 0x50,
0x44, 0x56, 0x58, 0x4d, 0x73, 0x5a, 0x38, 0x39, 0x48, 0x61, 0x44, 0x64, 0x30, 0x57, 0x52, 0x4c,
0x63, 0x37, 0x55, 0x41, 0x69, 0x45, 0x50, 0x2b, 0x50, 0x34, 0x73, 0x79, 0x6b, 0x59, 0x75, 0x6c,
0x35, 0x39, 0x62, 0x5a, 0x50, 0x6a, 0x7a, 0x41, 0x49, 0x70, 0x68, 0x0a, 0x46, 0x33, 0x52, 0x43,
0x51, 0x78, 0x4a, 0x57, 0x32, 0x4b, 0x4b, 0x6f, 0x47, 0x68, 0x6e, 0x39, 0x51, 0x6b, 0x6e, 0x56,
0x2f, 0x58, 0x73, 0x79, 0x62, 0x2b, 0x52, 0x42, 0x30, 0x35, 0x46, 0x4b, 0x41, 0x65, 0x43, 0x6c,
0x6e, 0x43, 0x56, 0x64, 0x4a, 0x78, 0x38, 0x57, 0x6c, 0x75, 0x45, 0x3d, 0x0a, 0x2d, 0x2d, 0x2d,
0x2d, 0x2d, 0x45, 0x4e, 0x44, 0x20, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54,
0x45, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a,
};
// Secure boot private exchange key
STATIC CONST UINT8 gSecureBootExchangePrivateKey[] = {
0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x50, 0x52, 0x49, 0x56, 0x41,
0x54, 0x45, 0x20, 0x4b, 0x45, 0x59, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x45,
0x76, 0x51, 0x49, 0x42, 0x41, 0x44, 0x41, 0x4e, 0x42, 0x67, 0x6b, 0x71, 0x68, 0x6b, 0x69, 0x47,
0x39, 0x77, 0x30, 0x42, 0x41, 0x51, 0x45, 0x46, 0x41, 0x41, 0x53, 0x43, 0x42, 0x4b, 0x63, 0x77,
0x67, 0x67, 0x53, 0x6a, 0x41, 0x67, 0x45, 0x41, 0x41, 0x6f, 0x49, 0x42, 0x41, 0x51, 0x44, 0x53,
0x4a, 0x69, 0x57, 0x69, 0x67, 0x34, 0x4c, 0x33, 0x31, 0x2b, 0x34, 0x57, 0x0a, 0x78, 0x66, 0x77,
0x66, 0x47, 0x4b, 0x4c, 0x71, 0x4f, 0x72, 0x34, 0x39, 0x58, 0x55, 0x79, 0x46, 0x75, 0x41, 0x53,
0x38, 0x43, 0x4d, 0x54, 0x6c, 0x43, 0x54, 0x63, 0x63, 0x62, 0x39, 0x54, 0x44, 0x63, 0x76, 0x38,
0x42, 0x30, 0x76, 0x32, 0x67, 0x2f, 0x33, 0x31, 0x46, 0x4b, 0x59, 0x79, 0x73, 0x6d, 0x77, 0x50,
0x47, 0x6d, 0x73, 0x32, 0x2b, 0x6d, 0x70, 0x72, 0x6e, 0x62, 0x30, 0x50, 0x48, 0x0a, 0x78, 0x70,
0x54, 0x75, 0x6b, 0x43, 0x32, 0x2b, 0x30, 0x73, 0x47, 0x31, 0x6f, 0x30, 0x32, 0x66, 0x6a, 0x57,
0x45, 0x46, 0x64, 0x46, 0x39, 0x36, 0x56, 0x64, 0x33, 0x64, 0x55, 0x53, 0x49, 0x71, 0x79, 0x69,
0x4f, 0x6a, 0x77, 0x2f, 0x50, 0x78, 0x30, 0x48, 0x4b, 0x6a, 0x4d, 0x5a, 0x4d, 0x6e, 0x2b, 0x68,
0x69, 0x36, 0x57, 0x70, 0x61, 0x4e, 0x39, 0x4f, 0x46, 0x4e, 0x66, 0x39, 0x39, 0x33, 0x0a, 0x75,
0x71, 0x4e, 0x75, 0x79, 0x63, 0x6f, 0x53, 0x58, 0x63, 0x51, 0x7a, 0x59, 0x54, 0x46, 0x65, 0x73,
0x4f, 0x79, 0x6f, 0x67, 0x63, 0x34, 0x65, 0x36, 0x4a, 0x4e, 0x6c, 0x42, 0x6c, 0x68, 0x48, 0x67,
0x61, 0x2f, 0x70, 0x37, 0x57, 0x63, 0x7a, 0x2b, 0x51, 0x49, 0x4e, 0x5a, 0x79, 0x54, 0x54, 0x5a,
0x38, 0x73, 0x65, 0x55, 0x31, 0x45, 0x61, 0x6e, 0x39, 0x37, 0x56, 0x4f, 0x39, 0x6b, 0x53, 0x0a,
0x59, 0x4b, 0x6e, 0x48, 0x76, 0x41, 0x39, 0x46, 0x30, 0x72, 0x46, 0x38, 0x55, 0x52, 0x57, 0x43,
0x73, 0x78, 0x39, 0x47, 0x65, 0x73, 0x37, 0x72, 0x65, 0x66, 0x78, 0x74, 0x65, 0x4e, 0x4d, 0x67,
0x30, 0x35, 0x78, 0x77, 0x4d, 0x47, 0x2b, 0x52, 0x46, 0x39, 0x4f, 0x64, 0x48, 0x65, 0x4c, 0x53,
0x33, 0x6b, 0x77, 0x70, 0x67, 0x65, 0x39, 0x4e, 0x64, 0x32, 0x75, 0x55, 0x56, 0x46, 0x6f, 0x77,
0x0a, 0x43, 0x64, 0x51, 0x72, 0x49, 0x35, 0x45, 0x48, 0x67, 0x65, 0x53, 0x36, 0x2f, 0x62, 0x6a,
0x42, 0x48, 0x44, 0x6b, 0x52, 0x79, 0x4f, 0x70, 0x72, 0x73, 0x67, 0x31, 0x68, 0x69, 0x64, 0x41,
0x7a, 0x4a, 0x37, 0x4a, 0x39, 0x68, 0x34, 0x73, 0x43, 0x36, 0x4f, 0x57, 0x58, 0x39, 0x47, 0x38,
0x49, 0x67, 0x51, 0x65, 0x4f, 0x71, 0x62, 0x58, 0x4c, 0x77, 0x79, 0x52, 0x49, 0x6c, 0x77, 0x58,
0x64, 0x0a, 0x58, 0x58, 0x6d, 0x4f, 0x72, 0x6e, 0x57, 0x4a, 0x41, 0x67, 0x4d, 0x42, 0x41, 0x41,
0x45, 0x43, 0x67, 0x67, 0x45, 0x41, 0x58, 0x6d, 0x2f, 0x4d, 0x47, 0x39, 0x4c, 0x7a, 0x71, 0x51,
0x4c, 0x39, 0x4f, 0x48, 0x69, 0x79, 0x53, 0x78, 0x73, 0x63, 0x49, 0x78, 0x78, 0x51, 0x33, 0x32,
0x79, 0x73, 0x39, 0x61, 0x35, 0x7a, 0x57, 0x6a, 0x69, 0x71, 0x6d, 0x71, 0x35, 0x67, 0x65, 0x4d,
0x50, 0x38, 0x0a, 0x6e, 0x66, 0x52, 0x77, 0x69, 0x6c, 0x5a, 0x52, 0x71, 0x65, 0x63, 0x47, 0x57,
0x57, 0x31, 0x4a, 0x6a, 0x55, 0x75, 0x41, 0x44, 0x2b, 0x45, 0x44, 0x4d, 0x6e, 0x4e, 0x78, 0x78,
0x79, 0x30, 0x6d, 0x44, 0x59, 0x47, 0x38, 0x37, 0x64, 0x51, 0x57, 0x36, 0x70, 0x6a, 0x31, 0x65,
0x66, 0x57, 0x76, 0x37, 0x56, 0x4a, 0x77, 0x56, 0x69, 0x34, 0x73, 0x74, 0x65, 0x49, 0x65, 0x65,
0x4e, 0x56, 0x38, 0x0a, 0x41, 0x44, 0x53, 0x6b, 0x66, 0x58, 0x53, 0x53, 0x6a, 0x49, 0x6c, 0x36,
0x36, 0x73, 0x46, 0x64, 0x65, 0x75, 0x71, 0x56, 0x38, 0x44, 0x44, 0x44, 0x56, 0x58, 0x6e, 0x46,
0x51, 0x62, 0x6d, 0x45, 0x4a, 0x39, 0x37, 0x55, 0x6b, 0x50, 0x58, 0x58, 0x66, 0x42, 0x64, 0x74,
0x45, 0x78, 0x58, 0x4c, 0x52, 0x4e, 0x31, 0x48, 0x44, 0x4a, 0x61, 0x41, 0x6c, 0x54, 0x30, 0x34,
0x39, 0x33, 0x31, 0x6f, 0x0a, 0x49, 0x6c, 0x52, 0x68, 0x7a, 0x4d, 0x64, 0x79, 0x7a, 0x31, 0x6b,
0x6e, 0x30, 0x6c, 0x57, 0x76, 0x47, 0x66, 0x6e, 0x31, 0x45, 0x61, 0x30, 0x39, 0x62, 0x49, 0x54,
0x71, 0x53, 0x7a, 0x43, 0x6d, 0x4f, 0x70, 0x34, 0x66, 0x57, 0x37, 0x30, 0x32, 0x2b, 0x32, 0x77,
0x33, 0x64, 0x46, 0x37, 0x34, 0x75, 0x70, 0x38, 0x38, 0x4a, 0x64, 0x62, 0x74, 0x46, 0x52, 0x68,
0x78, 0x62, 0x34, 0x45, 0x6e, 0x0a, 0x53, 0x53, 0x68, 0x67, 0x6e, 0x44, 0x47, 0x7a, 0x39, 0x65,
0x49, 0x47, 0x76, 0x55, 0x54, 0x6b, 0x39, 0x6c, 0x68, 0x30, 0x57, 0x31, 0x41, 0x64, 0x65, 0x76,
0x68, 0x68, 0x37, 0x7a, 0x31, 0x5a, 0x64, 0x46, 0x4a, 0x70, 0x44, 0x6d, 0x58, 0x37, 0x6d, 0x34,
0x33, 0x65, 0x73, 0x4d, 0x5a, 0x4a, 0x52, 0x4c, 0x4f, 0x61, 0x44, 0x4c, 0x4c, 0x51, 0x58, 0x61,
0x74, 0x51, 0x55, 0x6f, 0x78, 0x4d, 0x0a, 0x67, 0x6a, 0x64, 0x6f, 0x2f, 0x79, 0x6c, 0x4d, 0x4b,
0x63, 0x68, 0x41, 0x49, 0x46, 0x68, 0x54, 0x67, 0x41, 0x45, 0x66, 0x4d, 0x72, 0x4c, 0x50, 0x55,
0x6b, 0x6b, 0x75, 0x4b, 0x47, 0x39, 0x79, 0x6d, 0x73, 0x57, 0x5a, 0x31, 0x6a, 0x51, 0x54, 0x33,
0x51, 0x4b, 0x42, 0x67, 0x51, 0x44, 0x39, 0x75, 0x52, 0x63, 0x41, 0x39, 0x4b, 0x56, 0x45, 0x5a,
0x73, 0x6f, 0x41, 0x33, 0x68, 0x37, 0x5a, 0x0a, 0x57, 0x75, 0x48, 0x54, 0x58, 0x5a, 0x49, 0x56,
0x33, 0x4b, 0x4f, 0x2f, 0x57, 0x69, 0x4f, 0x78, 0x6a, 0x33, 0x6e, 0x33, 0x54, 0x66, 0x4c, 0x6b,
0x68, 0x54, 0x57, 0x4e, 0x45, 0x44, 0x76, 0x75, 0x36, 0x63, 0x76, 0x5a, 0x69, 0x78, 0x30, 0x75,
0x76, 0x48, 0x45, 0x77, 0x6e, 0x33, 0x74, 0x49, 0x6b, 0x67, 0x4e, 0x70, 0x46, 0x55, 0x4b, 0x2b,
0x78, 0x51, 0x38, 0x71, 0x35, 0x4a, 0x71, 0x32, 0x0a, 0x41, 0x46, 0x52, 0x6e, 0x67, 0x61, 0x68,
0x72, 0x74, 0x41, 0x59, 0x47, 0x54, 0x75, 0x6e, 0x4a, 0x6d, 0x4a, 0x56, 0x44, 0x4c, 0x32, 0x57,
0x57, 0x4c, 0x70, 0x66, 0x45, 0x4f, 0x45, 0x6e, 0x74, 0x46, 0x42, 0x6e, 0x65, 0x45, 0x48, 0x5a,
0x2b, 0x53, 0x44, 0x6e, 0x68, 0x4f, 0x30, 0x46, 0x4d, 0x57, 0x50, 0x6d, 0x53, 0x6d, 0x42, 0x36,
0x5a, 0x48, 0x6d, 0x58, 0x4d, 0x76, 0x70, 0x56, 0x56, 0x0a, 0x4b, 0x31, 0x2f, 0x45, 0x35, 0x67,
0x52, 0x43, 0x32, 0x45, 0x4a, 0x5a, 0x6d, 0x4d, 0x47, 0x4b, 0x43, 0x6b, 0x75, 0x49, 0x39, 0x32,
0x75, 0x5a, 0x50, 0x77, 0x4b, 0x42, 0x67, 0x51, 0x44, 0x55, 0x43, 0x50, 0x4c, 0x32, 0x4f, 0x63,
0x73, 0x4d, 0x45, 0x46, 0x34, 0x55, 0x79, 0x6c, 0x34, 0x74, 0x73, 0x59, 0x32, 0x39, 0x42, 0x37,
0x34, 0x30, 0x6f, 0x52, 0x6e, 0x30, 0x2b, 0x56, 0x78, 0x58, 0x0a, 0x74, 0x56, 0x6c, 0x4d, 0x54,
0x44, 0x63, 0x61, 0x4c, 0x64, 0x75, 0x32, 0x46, 0x5a, 0x74, 0x63, 0x55, 0x65, 0x4d, 0x44, 0x33,
0x6e, 0x42, 0x65, 0x41, 0x68, 0x32, 0x37, 0x51, 0x33, 0x49, 0x62, 0x73, 0x32, 0x2f, 0x7a, 0x6e,
0x68, 0x56, 0x52, 0x5a, 0x49, 0x30, 0x46, 0x68, 0x75, 0x68, 0x30, 0x2f, 0x42, 0x4e, 0x32, 0x4e,
0x6b, 0x56, 0x47, 0x6a, 0x55, 0x69, 0x52, 0x64, 0x54, 0x73, 0x51, 0x0a, 0x51, 0x45, 0x6f, 0x42,
0x53, 0x6e, 0x67, 0x49, 0x4d, 0x2b, 0x2f, 0x77, 0x71, 0x58, 0x5a, 0x43, 0x2f, 0x78, 0x35, 0x6f,
0x64, 0x56, 0x6c, 0x76, 0x78, 0x73, 0x62, 0x43, 0x74, 0x41, 0x51, 0x6d, 0x46, 0x32, 0x31, 0x30,
0x6f, 0x69, 0x64, 0x38, 0x2f, 0x78, 0x6b, 0x37, 0x71, 0x6d, 0x74, 0x39, 0x4d, 0x70, 0x31, 0x44,
0x74, 0x42, 0x4f, 0x6b, 0x65, 0x4a, 0x49, 0x63, 0x6f, 0x69, 0x78, 0x43, 0x0a, 0x64, 0x68, 0x37,
0x54, 0x53, 0x6c, 0x77, 0x33, 0x4e, 0x77, 0x4b, 0x42, 0x67, 0x51, 0x43, 0x6e, 0x76, 0x66, 0x61,
0x33, 0x61, 0x77, 0x62, 0x59, 0x63, 0x68, 0x79, 0x61, 0x39, 0x6a, 0x37, 0x75, 0x39, 0x47, 0x62,
0x59, 0x6c, 0x73, 0x51, 0x2b, 0x4c, 0x2b, 0x45, 0x53, 0x59, 0x55, 0x57, 0x33, 0x33, 0x6d, 0x4f,
0x45, 0x58, 0x4d, 0x52, 0x34, 0x30, 0x4a, 0x6a, 0x57, 0x36, 0x69, 0x53, 0x31, 0x0a, 0x6a, 0x31,
0x78, 0x32, 0x4e, 0x73, 0x4c, 0x55, 0x59, 0x30, 0x46, 0x79, 0x49, 0x2f, 0x34, 0x64, 0x76, 0x47,
0x73, 0x45, 0x7a, 0x6b, 0x6d, 0x53, 0x56, 0x6a, 0x58, 0x77, 0x35, 0x62, 0x4a, 0x6a, 0x49, 0x56,
0x53, 0x73, 0x47, 0x63, 0x78, 0x2f, 0x61, 0x45, 0x57, 0x54, 0x5a, 0x68, 0x69, 0x71, 0x62, 0x33,
0x59, 0x2f, 0x4e, 0x4e, 0x61, 0x30, 0x73, 0x35, 0x6b, 0x69, 0x34, 0x52, 0x50, 0x2b, 0x0a, 0x36,
0x59, 0x73, 0x77, 0x68, 0x54, 0x48, 0x52, 0x79, 0x32, 0x35, 0x34, 0x75, 0x66, 0x51, 0x78, 0x78,
0x61, 0x45, 0x36, 0x69, 0x64, 0x65, 0x77, 0x34, 0x36, 0x66, 0x70, 0x39, 0x52, 0x5a, 0x46, 0x65,
0x4b, 0x65, 0x59, 0x52, 0x58, 0x6f, 0x4c, 0x74, 0x32, 0x34, 0x6d, 0x54, 0x68, 0x65, 0x6d, 0x61,
0x36, 0x67, 0x45, 0x70, 0x59, 0x38, 0x39, 0x67, 0x51, 0x4b, 0x42, 0x67, 0x46, 0x6e, 0x77, 0x0a,
0x66, 0x64, 0x61, 0x78, 0x78, 0x77, 0x34, 0x54, 0x39, 0x30, 0x52, 0x4d, 0x58, 0x73, 0x78, 0x31,
0x44, 0x78, 0x46, 0x4b, 0x73, 0x6f, 0x53, 0x79, 0x66, 0x55, 0x63, 0x72, 0x61, 0x74, 0x38, 0x4e,
0x58, 0x70, 0x5a, 0x39, 0x6b, 0x62, 0x58, 0x36, 0x45, 0x38, 0x4e, 0x56, 0x77, 0x50, 0x50, 0x56,
0x44, 0x44, 0x31, 0x38, 0x34, 0x68, 0x38, 0x4f, 0x4f, 0x4c, 0x78, 0x52, 0x4c, 0x4a, 0x72, 0x52,
0x0a, 0x62, 0x30, 0x42, 0x67, 0x49, 0x74, 0x7a, 0x47, 0x6b, 0x51, 0x62, 0x79, 0x4a, 0x62, 0x4a,
0x52, 0x70, 0x70, 0x4f, 0x7a, 0x43, 0x34, 0x6d, 0x55, 0x72, 0x6c, 0x35, 0x38, 0x34, 0x70, 0x50,
0x50, 0x49, 0x56, 0x55, 0x51, 0x32, 0x48, 0x39, 0x71, 0x51, 0x48, 0x48, 0x4f, 0x41, 0x41, 0x73,
0x46, 0x47, 0x6b, 0x6e, 0x6d, 0x31, 0x51, 0x77, 0x54, 0x76, 0x67, 0x57, 0x69, 0x78, 0x45, 0x76,
0x41, 0x0a, 0x44, 0x6a, 0x36, 0x31, 0x39, 0x5a, 0x67, 0x51, 0x77, 0x51, 0x78, 0x4e, 0x43, 0x70,
0x45, 0x74, 0x6f, 0x67, 0x57, 0x77, 0x34, 0x64, 0x41, 0x58, 0x45, 0x4b, 0x73, 0x79, 0x6f, 0x79,
0x47, 0x42, 0x35, 0x79, 0x74, 0x71, 0x31, 0x46, 0x6a, 0x62, 0x41, 0x6f, 0x47, 0x41, 0x62, 0x39,
0x35, 0x5a, 0x70, 0x42, 0x56, 0x61, 0x51, 0x5a, 0x75, 0x31, 0x54, 0x45, 0x72, 0x62, 0x73, 0x63,
0x46, 0x76, 0x0a, 0x4d, 0x62, 0x2f, 0x65, 0x57, 0x70, 0x32, 0x49, 0x41, 0x49, 0x2b, 0x75, 0x5a,
0x2f, 0x73, 0x75, 0x72, 0x49, 0x5a, 0x5a, 0x73, 0x70, 0x4a, 0x39, 0x35, 0x45, 0x6c, 0x32, 0x75,
0x33, 0x79, 0x44, 0x4f, 0x48, 0x37, 0x71, 0x45, 0x55, 0x73, 0x59, 0x65, 0x6f, 0x77, 0x41, 0x79,
0x66, 0x77, 0x31, 0x44, 0x65, 0x55, 0x53, 0x4f, 0x73, 0x43, 0x74, 0x62, 0x65, 0x47, 0x6c, 0x34,
0x68, 0x70, 0x55, 0x0a, 0x43, 0x56, 0x6f, 0x76, 0x61, 0x4f, 0x73, 0x78, 0x50, 0x78, 0x43, 0x4a,
0x4f, 0x2b, 0x4e, 0x74, 0x59, 0x64, 0x77, 0x68, 0x7a, 0x32, 0x32, 0x6a, 0x70, 0x79, 0x62, 0x6d,
0x76, 0x2b, 0x53, 0x2f, 0x2b, 0x74, 0x76, 0x39, 0x46, 0x65, 0x6a, 0x6c, 0x69, 0x31, 0x62, 0x35,
0x64, 0x5a, 0x6b, 0x41, 0x47, 0x4a, 0x45, 0x48, 0x63, 0x63, 0x70, 0x32, 0x59, 0x52, 0x35, 0x76,
0x4a, 0x69, 0x33, 0x7a, 0x0a, 0x42, 0x2f, 0x33, 0x6f, 0x2b, 0x6e, 0x63, 0x75, 0x53, 0x2b, 0x2b,
0x53, 0x70, 0x51, 0x4f, 0x4c, 0x69, 0x46, 0x70, 0x53, 0x4f, 0x42, 0x55, 0x3d, 0x0a, 0x2d, 0x2d,
0x2d, 0x2d, 0x2d, 0x45, 0x4e, 0x44, 0x20, 0x50, 0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x20, 0x4b,
0x45, 0x59, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a,
};
| 29,675 |
4,756 |
/*
* Copyright (C) 2015 The Libphonenumber Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.i18n.phonenumbers;
import com.google.i18n.phonenumbers.Phonemetadata.PhoneMetadata;
import junit.framework.TestCase;
/**
* Unit tests for MultiFileMetadataSourceImpl.java.
*/
public class MultiFileMetadataSourceImplTest extends TestCase {
private static final MultiFileMetadataSourceImpl SOURCE =
new MultiFileMetadataSourceImpl(MetadataManager.DEFAULT_METADATA_LOADER);
private static final MultiFileMetadataSourceImpl MISSING_FILE_SOURCE =
new MultiFileMetadataSourceImpl("no/such/file", MetadataManager.DEFAULT_METADATA_LOADER);
public void testGeoPhoneNumberMetadataLoadCorrectly() {
// We should have some data for the UAE.
PhoneMetadata uaeMetadata = SOURCE.getMetadataForRegion("AE");
assertEquals(uaeMetadata.getCountryCode(), 971);
assertTrue(uaeMetadata.hasGeneralDesc());
}
public void testGeoPhoneNumberMetadataLoadFromMissingFileThrowsException() throws Exception {
try {
MISSING_FILE_SOURCE.getMetadataForRegion("AE");
fail("expected exception");
} catch (RuntimeException e) {
assertTrue("Unexpected error: " + e, e.getMessage().contains("no/such/file"));
}
}
public void testNonGeoPhoneNumberMetadataLoadCorrectly() {
// We should have some data for international toll-free numbers.
PhoneMetadata intlMetadata = SOURCE.getMetadataForNonGeographicalRegion(800);
assertEquals(intlMetadata.getId(), "001");
assertTrue(intlMetadata.hasGeneralDesc());
}
public void testNonGeoPhoneNumberMetadataLoadFromMissingFileThrowsException() throws Exception {
try {
MISSING_FILE_SOURCE.getMetadataForNonGeographicalRegion(800);
fail("expected exception");
} catch (RuntimeException e) {
assertTrue("Unexpected error: " + e, e.getMessage().contains("no/such/file"));
}
}
}
| 751 |
8,027 |
<reponame>Unknoob/buck
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.core.rules.providers.collect.impl;
import com.facebook.buck.core.rules.providers.Provider;
import com.facebook.buck.core.rules.providers.ProviderInfo;
import com.facebook.buck.core.rules.providers.collect.ProviderInfoCollection;
import com.facebook.buck.core.rules.providers.lib.DefaultInfo;
import com.google.devtools.build.lib.events.Location;
import com.google.devtools.build.lib.skylarkinterface.StarlarkContext;
import com.google.devtools.build.lib.syntax.EvalException;
import com.google.devtools.build.lib.syntax.EvalUtils;
import com.google.devtools.build.lib.syntax.Runtime;
import java.util.Optional;
/** Implementation of {@link ProviderInfoCollection}. */
public class LegacyProviderInfoCollectionImpl implements ProviderInfoCollection {
private static final LegacyProviderInfoCollectionImpl INSTANCE =
new LegacyProviderInfoCollectionImpl();
private LegacyProviderInfoCollectionImpl() {}
@Override
public Object getIndex(Object key, Location loc, StarlarkContext context) throws EvalException {
verifyKeyIsProvider(
key, loc, "Type Target only supports indexing by object constructors, got %s instead");
return Runtime.NONE;
}
@Override
public boolean containsKey(Object key, Location loc, StarlarkContext context)
throws EvalException {
verifyKeyIsProvider(
key, loc, "Type Target only supports querying by object constructors, got %s instead");
return false;
}
public static ProviderInfoCollection of() {
return INSTANCE;
}
@Override
public <T extends ProviderInfo<T>> Optional<T> get(Provider<T> provider) {
return Optional.empty();
}
@Override
public <T extends ProviderInfo<T>> boolean contains(Provider<T> provider) {
return false;
}
@Override
public DefaultInfo getDefaultInfo() {
throw new IllegalStateException(
"Attempting to access DefaultInfo on a legacy rule that does not expose providers");
}
private void verifyKeyIsProvider(Object key, Location loc, String s) throws EvalException {
if (!(key instanceof Provider)) {
throw new EvalException(loc, String.format(s, EvalUtils.getDataTypeName(key)));
}
}
}
| 821 |
7,482 |
/*
* @ : Copyright (c) 2021 Phytium Information Technology, Inc.
*
* SPDX-License-Identifier: Apache-2.0.
*
* @Date: 2021-04-05 22:15:53
* @LastEditTime: 2021-05-25 16:45:36
* @Description: Description of file
* @Modify History:
* * * Ver Who Date Changes
* * ----- ------ -------- --------------------------------------
* * 1.00 hh 2021.04-06 init
*/
#include "ft_qspi.h"
#include "qspi_hw.h"
#include "ft_io.h"
#include "ft_assert.h"
#include "ft_types.h"
#include "string.h"
#include "ft_debug.h"
#define FTQSPI_DEBUG_TAG "FTQSPI"
#define FTQSPI_DEBUG_I(format, ...) FT_DEBUG_PRINT_I(FTQSPI_DEBUG_TAG, format, ##__VA_ARGS__)
#define FTQSPI_DEBUG_E(format, ...) FT_DEBUG_PRINT_E(FTQSPI_DEBUG_TAG, format, ##__VA_ARGS__)
#define FTQSPI_DEBUG_W(format, ...) FT_DEBUG_PRINT_W(FTQSPI_DEBUG_TAG, format, ##__VA_ARGS__)
ft_error_t FQSpi_CfgInitialize(FQSpi_t *pQspi, FQSpi_Config_t *pConfig)
{
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pConfig != NULL);
pQspi->config = *pConfig;
pQspi->isReady = FT_COMPONENT_IS_READLY;
FQSpi_Reset(pQspi);
return FQSPI_SUCCESS;
}
/**
* @name: FQSpi_MemcpyToReg
* @msg: Memory copy To Register
* @in param {FQSpi_t} *pQspi
* @in param {u8} *buf
* @in param {u32} length
* @return {ft_error_t}
*/
static ft_error_t FQSpi_MemcpyToReg(FQSpi_t *pQspi, FT_IN u8 *buf, u32 length)
{
u32 val = 0;
FQSpi_Config_t *pConfig = NULL;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
if (!buf || (length > 4))
{
return FQSPI_FAILURE;
}
if (1 == length)
{
val = buf[0];
}
else if (2 == length)
{
val = buf[1];
val = (val << 8) + buf[0];
}
else if (3 == length)
{
val = buf[2];
val = (val << 8) + buf[1];
val = (val << 8) + buf[0];
}
else if (4 == length)
{
val = buf[3];
val = (val << 8) + buf[2];
val = (val << 8) + buf[1];
val = (val << 8) + buf[0];
}
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_LD_PORT_OFFSET, val);
return FQSPI_SUCCESS;
}
/**
* @name: FQSpi_MemcpyFromReg
* @msg: Memory copy from Register
* @in param {FT_INFQSpi_t} *pQspi
* @out param {u8} *buf
* @in param {u32} length
* @return {*}
*/
static ft_error_t FQSpi_MemcpyFromReg(FQSpi_t *pQspi, u8 *buf, u32 length)
{
s32 i;
u32 val = 0;
FQSpi_Config_t *pConfig = NULL;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
for (i = 0; i < length; i++)
{
/* code */
if (0 == i % 4)
{
val = Ft_in32(pConfig->baseAddress + FT_REG_QSPI_LD_PORT_OFFSET);
}
buf[i] = (u8)(val >> (i % 4) * 8) & 0xff;
}
return FQSPI_SUCCESS;
}
/**
* @name: FQSpi_FlashRead
* @msg: Reads bytes data from flash addr to buf
* @in param pQspi:
* @in param cmd: Read the instruction byte of the command
* @in param addr: Read the data start character
* @out param rxBuf: Read buffer
* @in param length: need read length
* @return {*}
*/
ft_error_t FQSpi_FlashRead(FQSpi_t *pQspi,
FT_IN u8 cmd,
FT_IN u32 addr,
FT_OUT u8 *rxBuf,
u32 length)
{
FQSpi_Config_t *pConfig = NULL;
FQSpi_RdCfgReg_t rdCfgReg;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
if ((NULL == rxBuf) || (0 == length))
{
return FQSPI_FAILURE;
}
rdCfgReg.data = 0;
rdCfgReg.val.rdCmd = cmd;
rdCfgReg.val.dBuffer = 1;
rdCfgReg.val.rdAddrSel = pConfig->addrMode;
rdCfgReg.val.rdSckSel = pConfig->clkDiv;
rdCfgReg.val.rdTransfer = pConfig->transMode;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_RD_CFG_OFFSET, rdCfgReg.data);
memcpy(rxBuf, (char *)(addr), length);
return FQSPI_SUCCESS;
}
/**
* @name: FQSpi_FlashWrite
* @msg: Writes one page into flash,changing bits from 1 to 0
* @in param pQspi:
* @in param cmd: write the instruction byte of the command
* @in param addr: write the data start character
* @in param txBuf: write buffer
* @in param length: need write length
* @return {*}
*/
ft_error_t FQSpi_FlashWrite(FQSpi_t *pQspi,
FT_IN u8 cmd,
FT_IN u32 addr,
FT_IN u8 *txBuf,
u32 length)
{
FQSpi_Config_t *pConfig = NULL;
FQSpi_WrCfgReg_t wrCfgReg;
u32 index = 0;
u32 val = 0;
u32 *pu32Buf = NULL;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
if ((NULL == txBuf) || (0 == length))
{
return FQSPI_FAILURE;
}
pu32Buf = (u32 *)txBuf;
wrCfgReg.data = 0;
wrCfgReg.val.wrCmd = cmd;
wrCfgReg.val.wrWait = 1;
wrCfgReg.val.wrSckSel = pConfig->clkDiv;
wrCfgReg.val.wrAddrsel = pConfig->addrMode;
wrCfgReg.val.wrTransfer = pConfig->transMode;
wrCfgReg.val.wrMode = 1;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_WR_CFG_OFFSET, wrCfgReg.data);
while (length)
{
if (length >= 4)
{
Ft_out32(addr + index, pu32Buf[index / 4]);
length -= 4;
index += 4;
}
else
{
if (1 == length)
{
val = txBuf[index] | 0xFFFFFF00;
}
else if (2 == length)
{
val = txBuf[index] | (txBuf[index + 1] << 8) | 0xFFFF0000;
}
else
{
val = txBuf[index] | (txBuf[index + 1] << 8) | (txBuf[index + 2] << 8) | 0xFF000000;
}
Ft_out32(addr + index, val);
length = 0;
}
}
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_FLUSH_OFFSET, 1);
return FQSPI_SUCCESS;
}
/**
* @name: FQSpi_FlashRegSet
* @msg: Set registers of flash
* @in param cmd: Command byte
* @in param writebuf: write buffer
* @in param length: need write length
* @return {*}
*/
ft_error_t FQSpi_FlashRegSet(FQSpi_t *pQspi,
FT_IN u8 cmd,
FT_IN u8 *writebuf,
u32 length)
{
FQSpi_Config_t *pConfig = NULL;
FQSpi_CmdPortReg_t cmdPortReg;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
cmdPortReg.data = 0;
cmdPortReg.val.cmd = cmd;
cmdPortReg.val.wait = 1;
cmdPortReg.val.sckSel = pConfig->clkDiv;
cmdPortReg.val.transfer = pConfig->transMode;
cmdPortReg.val.cs = pConfig->channel;
if (length == 0)
{
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_CMD_PORT_OFFSET, cmdPortReg.data);
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_LD_PORT_OFFSET, 1);
}
else
{
cmdPortReg.val.dataTransfer = 1;
cmdPortReg.val.rwMum = length;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_CMD_PORT_OFFSET, cmdPortReg.data);
FQSpi_MemcpyToReg(pQspi, writebuf, length);
}
return FQSPI_SUCCESS;
}
ft_error_t FQSpi_FlashRegSetWithaddr(FQSpi_t *pQspi,
FT_IN u8 cmd,
FT_IN u32 addr,
FT_IN u8 *writebuf,
u32 length)
{
FQSpi_Config_t *pConfig = NULL;
FQSpi_CmdPortReg_t cmdPortReg;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
cmdPortReg.data = 0;
cmdPortReg.val.cmd = cmd;
cmdPortReg.val.wait = 1;
cmdPortReg.val.sckSel = pConfig->clkDiv;
cmdPortReg.val.transfer = pConfig->transMode;
cmdPortReg.val.cs = pConfig->channel;
cmdPortReg.val.cmdAddr = 1;
cmdPortReg.val.addrSel = pConfig->addrMode;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_ADDR_PORT_OFFSET, addr);
if (length == 0)
{
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_CMD_PORT_OFFSET, cmdPortReg.data);
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_LD_PORT_OFFSET, 0);
}
else
{
cmdPortReg.val.dataTransfer = 1;
cmdPortReg.val.rwMum = length;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_CMD_PORT_OFFSET, cmdPortReg.data);
FQSpi_MemcpyToReg(pQspi, writebuf, length);
}
return FQSPI_SUCCESS;
}
ft_error_t FQSpi_FlashRegGet(FQSpi_t *pQspi,
FT_IN u8 cmd,
u8 *readbuf,
u32 length)
{
FQSpi_Config_t *pConfig = NULL;
FQSpi_CmdPortReg_t cmdPortReg;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
cmdPortReg.data = 0;
cmdPortReg.val.cmd = cmd;
cmdPortReg.val.wait = 1;
cmdPortReg.val.sckSel = pConfig->clkDiv;
cmdPortReg.val.transfer = pConfig->transMode;
cmdPortReg.val.cs = pConfig->channel;
cmdPortReg.val.dataTransfer = 1;
cmdPortReg.val.pBuffer = 1;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_CMD_PORT_OFFSET, cmdPortReg.data);
FQSpi_MemcpyFromReg(pQspi, readbuf, length);
return FQSPI_SUCCESS;
}
ft_error_t FQSpi_FlashRegGetWithAddr(FQSpi_t *pQspi,
FT_IN u8 cmd,
FT_IN u32 addr,
FT_IN u32 dummyCycle,
u8 *readbuf,
u32 length)
{
FQSpi_Config_t *pConfig = NULL;
FQSpi_CmdPortReg_t cmdPortReg;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
cmdPortReg.data = 0;
cmdPortReg.val.cmd = cmd;
cmdPortReg.val.wait = 1;
cmdPortReg.val.sckSel = pConfig->clkDiv;
cmdPortReg.val.transfer = pConfig->transMode;
cmdPortReg.val.cs = pConfig->channel;
cmdPortReg.val.dataTransfer = 1;
cmdPortReg.val.pBuffer = 1;
cmdPortReg.val.cmdAddr = 1;
cmdPortReg.val.addrSel = pConfig->addrMode;
cmdPortReg.val.latency = 1;
cmdPortReg.val.dummy = dummyCycle - 1;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_ADDR_PORT_OFFSET, addr);
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_CMD_PORT_OFFSET, cmdPortReg.data);
FQSpi_MemcpyFromReg(pQspi, readbuf, length);
return FQSPI_SUCCESS;
}
ft_error_t FQSpi_Write(FQSpi_t *pQspi, struct FQSpi_DataPack *pDataPack)
{
FQSpi_Config_t *pConfig = NULL;
FQSpi_WrCfgReg_t wrCfgReg;
u32 length;
u32 index = 0;
u32 val = 0;
const u32 *pu32Buf = NULL;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
if ((FQSPI_DATA_ADDRESS_3BYTE_MASK | FQSPI_DATA_ADDRESS_4BYTE_MASK) == (pDataPack->flags & (FQSPI_DATA_ADDRESS_3BYTE_MASK | FQSPI_DATA_ADDRESS_4BYTE_MASK)))
{
FTQSPI_DEBUG_E(" Two addresses are not allowed at the same time ");
return FQSPI_FAILURE;
}
if (0 == (pDataPack->flags & (FQSPI_DATA_ADDRESS_3BYTE_MASK | FQSPI_DATA_ADDRESS_4BYTE_MASK)))
{
FTQSPI_DEBUG_E(" There is no address configuration ");
return FQSPI_FAILURE;
}
if (NULL == pDataPack->txBuf)
{
FTQSPI_DEBUG_E("pDataPack->txBuf is null");
return FQSPI_FAILURE;
}
pu32Buf = (const u32 *)pDataPack->txBuf;
wrCfgReg.data = 0;
if (FQSPI_DATA_ADDRESS_3BYTE_MASK == (pDataPack->flags & FQSPI_DATA_ADDRESS_3BYTE_MASK))
{
wrCfgReg.val.wrAddrsel = FT_QSPI_ADDR_SEL_3;
}
else if (FQSPI_DATA_ADDRESS_4BYTE_MASK == (pDataPack->flags & FQSPI_DATA_ADDRESS_4BYTE_MASK))
{
wrCfgReg.val.wrAddrsel = FT_QSPI_ADDR_SEL_4;
}
wrCfgReg.val.wrCmd = pDataPack->cmd;
wrCfgReg.val.wrWait = 1;
wrCfgReg.val.wrSckSel = pConfig->clkDiv;
wrCfgReg.val.wrTransfer = pConfig->transMode;
wrCfgReg.val.wrMode = 1;
length = pDataPack->length;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_WR_CFG_OFFSET, wrCfgReg.data);
while (length)
{
if (length >= 4)
{
Ft_out32(pDataPack->addr + index, pu32Buf[index / 4]);
length -= 4;
index += 4;
}
else
{
if (1 == length)
{
val = pDataPack->txBuf[index] | 0xFFFFFF00;
}
else if (2 == length)
{
val = pDataPack->txBuf[index] | (pDataPack->txBuf[index + 1] << 8) | 0xFFFF0000;
}
else
{
val = pDataPack->txBuf[index] | (pDataPack->txBuf[index + 1] << 8) | (pDataPack->txBuf[index + 2] << 8) | 0xFF000000;
}
FTQSPI_DEBUG_I("val is 0x%x", val);
Ft_out32(pDataPack->addr + index, val);
length = 0;
}
}
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_FLUSH_OFFSET, 1);
return FQSPI_SUCCESS;
}
ft_error_t FQSpi_Read(FQSpi_t *pQspi, struct FQSpi_DataPack *pDataPack)
{
FQSpi_Config_t *pConfig = NULL;
FQSpi_RdCfgReg_t rdCfgReg;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
if ((FQSPI_DATA_ADDRESS_3BYTE_MASK | FQSPI_DATA_ADDRESS_4BYTE_MASK) == (pDataPack->flags & (FQSPI_DATA_ADDRESS_3BYTE_MASK | FQSPI_DATA_ADDRESS_4BYTE_MASK)))
{
FTQSPI_DEBUG_E(" Two addresses are not allowed at the same time ");
return FQSPI_FAILURE;
}
if (0 == (pDataPack->flags & (FQSPI_DATA_ADDRESS_3BYTE_MASK | FQSPI_DATA_ADDRESS_4BYTE_MASK)))
{
FTQSPI_DEBUG_E(" There is no address configuration ");
return FQSPI_FAILURE;
}
if (NULL == pDataPack->rxBuf)
{
FTQSPI_DEBUG_E("pDataPack->rxBuf is null");
return FQSPI_FAILURE;
}
rdCfgReg.data = 0;
if (FQSPI_DATA_NEED_DUMMY_MASK == (pDataPack->flags & FQSPI_DATA_NEED_DUMMY_MASK))
{
rdCfgReg.val.rdLatency = 1;
rdCfgReg.val.dummy = pDataPack->dummyCycle - 1;
}
if (FQSPI_DATA_ADDRESS_3BYTE_MASK == (pDataPack->flags & FQSPI_DATA_ADDRESS_3BYTE_MASK))
{
rdCfgReg.val.rdAddrSel = FT_QSPI_ADDR_SEL_3;
}
else if (FQSPI_DATA_ADDRESS_4BYTE_MASK == (pDataPack->flags & FQSPI_DATA_ADDRESS_4BYTE_MASK))
{
rdCfgReg.val.rdAddrSel = FT_QSPI_ADDR_SEL_4;
}
rdCfgReg.val.rdCmd = pDataPack->cmd;
rdCfgReg.val.dBuffer = 1;
rdCfgReg.val.rdSckSel = pConfig->clkDiv;
rdCfgReg.val.rdTransfer = pConfig->transMode;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_RD_CFG_OFFSET, rdCfgReg.data);
memcpy(pDataPack->rxBuf, (char *)(pDataPack->addr), pDataPack->length);
return FQSPI_SUCCESS;
}
ft_error_t
FQSpi_CmdOperation(FQSpi_t *pQspi, struct FQSpi_CmdPack *pCmdPack)
{
FQSpi_Config_t *pConfig = NULL;
FQSpi_CmdPortReg_t cmdPortReg;
Ft_assertNonvoid(pQspi != NULL);
Ft_assertNonvoid(pQspi->isReady == FT_COMPONENT_IS_READLY);
pConfig = &pQspi->config;
if ((FQSPI_CMD_ADDRESS_3BYTE_MASK | FQSPI_CMD_ADDRESS_4BYTE_MASK) == (pCmdPack->flags & (FQSPI_CMD_ADDRESS_3BYTE_MASK | FQSPI_CMD_ADDRESS_4BYTE_MASK)))
{
FTQSPI_DEBUG_E(" Two addresses are not allowed at the same time ");
return FQSPI_FAILURE;
}
cmdPortReg.data = 0;
cmdPortReg.val.cmd = pCmdPack->cmd;
cmdPortReg.val.wait = 1;
cmdPortReg.val.sckSel = pConfig->clkDiv;
cmdPortReg.val.transfer = pConfig->transMode;
cmdPortReg.val.cs = pConfig->channel;
if (FQSPI_CMD_NEED_ADDR_MASK == (pCmdPack->flags & FQSPI_CMD_NEED_ADDR_MASK))
{
// FTQSPI_DEBUG_I(" send addr is 0x%x ", pCmdPack->addr);
cmdPortReg.val.cmdAddr = 1;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_ADDR_PORT_OFFSET, pCmdPack->addr);
}
if (FQSPI_CMD_NEED_DUMMY_MASK == (pCmdPack->flags & FQSPI_CMD_NEED_DUMMY_MASK))
{
cmdPortReg.val.latency = 1;
cmdPortReg.val.dummy = pCmdPack->dummyCycle - 1;
}
if (FQSPI_CMD_ADDRESS_3BYTE_MASK == (pCmdPack->flags & FQSPI_CMD_ADDRESS_3BYTE_MASK))
{
cmdPortReg.val.addrSel = FT_QSPI_ADDR_SEL_3;
}
else if (FQSPI_CMD_ADDRESS_4BYTE_MASK == (pCmdPack->flags & FQSPI_CMD_ADDRESS_4BYTE_MASK))
{
cmdPortReg.val.addrSel = FT_QSPI_ADDR_SEL_4;
}
if (FQSPI_CMD_NEED_SET_MASK == (pCmdPack->flags & (FQSPI_CMD_NEED_SET_MASK)))
{
cmdPortReg.val.dataTransfer = 1;
cmdPortReg.val.rwMum = pCmdPack->length;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_CMD_PORT_OFFSET, cmdPortReg.data);
FQSpi_MemcpyToReg(pQspi, pCmdPack->txBuf, pCmdPack->length);
}
else if (FQSPI_CMD_NEED_GET_MASK == (pCmdPack->flags & (FQSPI_CMD_NEED_GET_MASK)))
{
cmdPortReg.val.dataTransfer = 1;
cmdPortReg.val.pBuffer = 1;
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_CMD_PORT_OFFSET, cmdPortReg.data);
FQSpi_MemcpyFromReg(pQspi, pCmdPack->rxBuf, pCmdPack->length);
}
else
{
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_CMD_PORT_OFFSET, cmdPortReg.data);
if (FQSPI_CMD_NEED_ADDR_MASK == (pCmdPack->flags & FQSPI_CMD_NEED_ADDR_MASK))
{
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_LD_PORT_OFFSET, 0);
}
else
{
Ft_out32(pConfig->baseAddress + FT_REG_QSPI_LD_PORT_OFFSET, 1);
}
}
return FQSPI_SUCCESS;
}
| 9,404 |
1,652 |
<filename>redis/redis-core/src/main/java/com/ctrip/xpipe/redis/core/proxy/endpoint/SelectOneCycle.java
package com.ctrip.xpipe.redis.core.proxy.endpoint;
/**
* @author chen.zhu
* <p>
* Jun 01, 2018
*/
public class SelectOneCycle implements SelectStrategy {
private ProxyEndpointSelector selector;
public SelectOneCycle(ProxyEndpointSelector selector) {
this.selector = selector;
}
@Override
public boolean select() {
return selector.selectCounts() < selector.getCandidates().size();
}
}
| 196 |
536 |
<reponame>Chinalijian/ACGN2D3D
//
// UMSocialWarterMarkConfig.h
// testWatermarkImage
//
// Created by 张军华 on 16/12/23.
// Copyright © 2016年 张军华. All rights reserved.
//
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
#import <CoreGraphics/CoreGraphics.h>
@class UMSocialStringWarterMarkConfig;
@class UMSocialImageWarterMarkConfig;
typedef NS_ENUM(NSInteger, UMSocialWarterMarkPositon) {
UMSocialWarterMarkPositonNone = 0,
/************************************************************************
水印字符串的位置,目前并没有用--start
*************************************************************************/
UMSocialStringWarterMarkTopLeft = (1 << 0),
UMSocialStringWarterMarkTopRight = (1 << 1),
UMSocialStringWarterMarkBottomLeft = (1 << 2),
UMSocialStringWarterMarkBottomRight = (1 << 3),
/************************************************************************
水印字符串的位置,目前并没有用--end
*************************************************************************/
//水印图片的位置
UMSocialImageWarterMarkTopLeft = (1 << 4),
UMSocialImageWarterMarkTopRight = (1 << 5),
UMSocialImageWarterMarkBottomLeft = (1 << 6),
UMSocialImageWarterMarkBottomRight = (1 << 7),
/************************************************************************
水印字符串和水印图片的相对位置,目前并没有用(如果图片和字符串都在同一个位置,就需要设置相对位置)--start
*************************************************************************/
UMSocialImageWarterMarkForwardStringWarterMark = (1 << 8), //图片在字符串前面
UMSocialStringWarterMarkForwardImageWarterMark = (1 << 9),//字符串在图片前面
UMSocialImageWarterMarkAboveStringWarterMark = (1 << 10),//图片在字符串上面
UMSocialStringWarterMarkAboveImageWarterMark = (1 << 11),//字符串在图片上面
/************************************************************************
水印字符串和水印图片的相对位置,目前并没有用(如果图片和字符串都在同一个位置,就需要设置相对位置)--end
*************************************************************************/
};
typedef NS_OPTIONS(NSInteger, UMSocialStringAndImageWarterMarkPositon) {
UMSocialStringAndImageWarterMarkPositonNone = 0,
UMSocialOnlyImageWarterMarkTopLeft = UMSocialImageWarterMarkTopLeft,//水印图片左上
UMSocialOnlyImageWarterMarkTopRight = UMSocialImageWarterMarkTopRight,//水印图片右上
UMSocialOnlyImageWarterMarkBottomLeft = UMSocialImageWarterMarkBottomLeft,//水印图片左下
UMSocialOnlyImageWarterMarkBottomRight = UMSocialImageWarterMarkBottomRight,//水印图片右下
/************************************************************************
以下的枚举变量,目前并没有用--start
*************************************************************************/
UMSocialStringWarterMarkTopLeftAndImageWarterMarkTopLeft = (UMSocialStringWarterMarkTopLeft | UMSocialImageWarterMarkTopLeft),//水印字符串左上,水印图片左上
UMSocialStringWarterMarkTopLeftAndImageWarterMarkTopRight = (UMSocialStringWarterMarkTopLeft | UMSocialImageWarterMarkTopRight),//水印字符串左上,水印图片右上
UMSocialStringWarterMarkTopLeftAndImageWarterMarkBottomLeft = (UMSocialStringWarterMarkTopLeft | UMSocialImageWarterMarkBottomLeft),//水印字符串左上,水印图片左下
UMSocialStringWarterMarkTopLeftAndImageWarterMarkBottomRight = (UMSocialStringWarterMarkTopLeft | UMSocialImageWarterMarkBottomRight),//水印字符串左上,水印图片右下
UMSocialStringWarterMarkTopRightAndImageWarterMarkTopLeft = (UMSocialStringWarterMarkTopRight | UMSocialImageWarterMarkTopLeft),//水印字符串右上,水印图片左上
UMSocialStringWarterMarkTopRightAndImageWarterMarkTopRight = (UMSocialStringWarterMarkTopRight | UMSocialImageWarterMarkTopRight),//水印字符串右上,水印图片右上
UMSocialStringWarterMarkTopRightAndImageWarterMarkBottomLeft = (UMSocialStringWarterMarkTopRight | UMSocialImageWarterMarkBottomLeft),//水印字符串右上,水印图片左下
UMSocialStringWarterMarkTopRightAndImageWarterMarkBottomRight = (UMSocialStringWarterMarkTopRight | UMSocialImageWarterMarkBottomRight),//水印字符串右上,水印图片右下
UMSocialStringWarterMarkBottomLeftAndImageWarterMarkTopLeft = (UMSocialStringWarterMarkBottomLeft | UMSocialImageWarterMarkTopLeft),//水印字符串左下,水印图片左上
UMSocialStringWarterMarkBottomLeftAndImageWarterMarkTopRight = (UMSocialStringWarterMarkBottomLeft | UMSocialImageWarterMarkTopRight),//水印字符串左下,水印图片右上
UMSocialStringWarterMarkBottomLeftAndImageWarterMarkBottomLeft = (UMSocialStringWarterMarkBottomLeft | UMSocialImageWarterMarkBottomLeft),//水印字符串左下,水印图片左下
UMSocialStringWarterMarkBottomLeftAndImageWarterMarkBottomRight = (UMSocialStringWarterMarkBottomLeft | UMSocialImageWarterMarkBottomRight),//水印字符串左下,水印图片右下
UMSocialStringWarterMarkBottomRightAndImageWarterMarkTopLeft = (UMSocialStringWarterMarkBottomRight | UMSocialImageWarterMarkTopLeft),//水印字符串右下,水印图片左上
UMSocialStringWarterMarkBottomRightAndImageWarterMarkTopRight = (UMSocialStringWarterMarkBottomRight | UMSocialImageWarterMarkTopRight),//水印字符串右下,水印图片右上
UMSocialStringWarterMarkBottomRightAndImageWarterMarkBottomLeft = (UMSocialStringWarterMarkBottomRight | UMSocialImageWarterMarkBottomLeft),//水印字符串右下,水印图片左下
UMSocialStringWarterMarkBottomRightAndImageWarterMarkBottomRight = (UMSocialStringWarterMarkBottomRight | UMSocialImageWarterMarkBottomRight),//水印字符串右下,水印图片右下
/************************************************************************
以下的枚举变量,目前并没有用---end
*************************************************************************/
};
extern UMSocialWarterMarkPositon getStringWarterMarkPostion(UMSocialStringAndImageWarterMarkPositon stringAndImageWarterMarkPositon);
extern UMSocialWarterMarkPositon getImageWarterMarkPostion(UMSocialStringAndImageWarterMarkPositon stringAndImageWarterMarkPositon);
extern UMSocialWarterMarkPositon getRelatedWarterMarkPostion(UMSocialStringAndImageWarterMarkPositon stringAndImageWarterMarkPositon);
/**
* 水印配置类
* 用户可以设置水印的配置类,目前只是提供图片水印
*
* method1:
* 用户可以通过默认的配置类来配置水印
* 代码如下:
UMSocialWarterMarkConfig* warterMarkConfig = [UMSocialWarterMarkConfig defaultWarterMarkConfig];
*
* method2:
* 用户可以通过创建自己的配置类来配置水印
* 代码如下:
//创建UMSocialImageWarterMarkConfig
UMSocialImageWarterMarkConfig* imageWarterMarkConfig = [[UMSocialImageWarterMarkConfig alloc] init];
//配置imageWarterMarkConfig的参数
//...TODO
//创建UMSocialWarterMarkConfig
UMSocialWarterMarkConfig* warterMarkConfig = [[UMSocialWarterMarkConfig alloc] init];
//配置warterMarkConfig的参数
//...TODO
//设置配置类
[warterMarkConfig setUserDefinedImageWarterMarkConfig:imageWarterMarkConfig];
*
*
*/
@interface UMSocialWarterMarkConfig : NSObject<NSCopying>
/**
* 默认配置类
*
* @return 默认配置类
*/
+(UMSocialWarterMarkConfig*)defaultWarterMarkConfig;
@property(nonatomic,readonly,strong)UMSocialStringWarterMarkConfig* stringWarterMarkConfig;//字符串配置类对象
@property(nonatomic,readonly,strong)UMSocialImageWarterMarkConfig* imageWarterMarkConfig;//图片配置类对象
/**
* 字符串和图片的位置
* 默认是defaultWarterMarkConfig的配置为文字和图片右下角,图片在前文字在后
*/
@property(nonatomic,readwrite,assign)UMSocialStringAndImageWarterMarkPositon stringAndImageWarterMarkPositon;//字符串和图片的位置
@property(nonatomic,readwrite,assign)CGFloat spaceBetweenStringWarterMarkAndImageWarterMark;//字符水印和图片水印的间距
/**
* 设置用户自定义的配置类
*
* @param imageWarterMarkConfig 图片配置类对象
*/
-(void)setUserDefinedImageWarterMarkConfig:(UMSocialImageWarterMarkConfig*)imageWarterMarkConfig;
@end
/**
* 字符水印配置类
* 目前此配置类没有使用
*/
@interface UMSocialStringWarterMarkConfig : NSObject<NSCopying>
/**
* 默认配置类
*
* @return 默认配置类
*/
+(UMSocialStringWarterMarkConfig*)defaultStringWarterMarkConfig;
//检查参数是否有效
-(BOOL)checkValid;
@property(nonatomic,readwrite,strong)NSAttributedString* warterMarkAttributedString;//水印字符串
@property(nonatomic,readwrite,assign)NSUInteger warterMarkStringLimit;//水印字符串的字数限制
@property(nonatomic,readwrite,strong)UIColor* warterMarkStringColor;//水印字符串的颜色(要想保证色值半透明,可以创建半透明的颜色对象)
@property(nonatomic,readwrite,strong)UIFont* warterMarkStringFont;//水印字符串的字体
/**
* 靠近水平边的边距
* 与UMSocialWarterMarkPositon的停靠位置有关,
如:为UMSocialStringWarterMarkBottomRight时,paddingToHorizontalParentBorder代表与父窗口的右边间隙.
如:UMSocialStringWarterMarkTopLeft时,paddingToHorizontalParentBorder代表与父窗口的左边间隙.
*/
@property(nonatomic,readwrite,assign)CGFloat paddingToHorizontalParentBorder;//靠近水平边的边距
/**
* 靠近垂直边的边距
* 与UMSocialWarterMarkPositon的停靠位置有关,
如:为UMSocialStringWarterMarkBottomRight时,paddingToHorizontalParentBorder代表与父窗口的下边的间隙.
如:UMSocialStringWarterMarkTopLeft时,paddingToHorizontalParentBorder代表与父窗口的上边间隙.
*/
@property(nonatomic,readwrite,assign)CGFloat paddingToVerticalParentBorder;//靠近垂直边的边距
@property(nonatomic,readonly,assign)CGAffineTransform warterMarkStringTransform;//水印字符串的矩阵
@end
/**
* 图片配置类
*/
@interface UMSocialImageWarterMarkConfig : NSObject<NSCopying>
/**
* 默认配置类
*
* @return 默认配置类
*/
+(UMSocialImageWarterMarkConfig*)defaultImageWarterMarkConfig;
//检查参数是否有效
-(BOOL)checkValid;
@property(nonatomic,readwrite,strong)UIImage* warterMarkImage;//水印图片
@property(nonatomic,readwrite,assign)CGFloat warterMarkImageScale;//水印图片相对父图片的缩放因素(0-1之间)
@property(nonatomic,readwrite,assign)CGFloat warterMarkImageAlpha;//水印图片的Alpha混合值
/**
* 靠近水平边的边距
* 与UMSocialWarterMarkPositon的停靠位置有关,
如:为UMSocialImageWarterMarkBottomRight时,paddingToHorizontalParentBorder代表与父窗口的右边间隙.
如:UMSocialImageWarterMarkTopLeft时,paddingToHorizontalParentBorder代表与父窗口的左边间隙.
*/
@property(nonatomic,readwrite,assign)CGFloat paddingToHorizontalParentBorder;//靠近水平边的边距
/**
* 靠近垂直边的边距
* 与UMSocialWarterMarkPositon的停靠位置有关,
如:为UMSocialImageWarterMarkBottomRight时,paddingToHorizontalParentBorder代表与父窗口的下边间隙.
如:UMSocialImageWarterMarkTopLeft时,paddingToHorizontalParentBorder代表与父窗口的上边间隙.
*/
@property(nonatomic,readwrite,assign)CGFloat paddingToVerticalParentBorder;//靠近垂直边的边距
@property(nonatomic,readonly,assign)CGAffineTransform warterMarkImageTransform;//水印图片的矩阵
@end
| 5,034 |
777 |
<filename>chrome/browser/ui/cocoa/autofill/autofill_tooltip_controller.h
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_UI_COCOA_AUTOFILL_AUTOFILL_TOOLTIP_CONTROLLER_H_
#define CHROME_BROWSER_UI_COCOA_AUTOFILL_AUTOFILL_TOOLTIP_CONTROLLER_H_
#import <Cocoa/Cocoa.h>
#include "base/mac/scoped_nsobject.h"
#import "chrome/browser/ui/cocoa/info_bubble_view.h"
@class AutofillBubbleController;
@class AutofillTooltip;
// Controller for the Tooltip view, which handles displaying/hiding the
// tooltip bubble on hover.
@interface AutofillTooltipController : NSViewController {
@private
base::scoped_nsobject<AutofillTooltip> view_;
AutofillBubbleController* bubbleController_;
NSString* message_;
info_bubble::BubbleArrowLocation arrowLocation_;
// Indicates whether a tooltip bubble should show. YES when hovering on icon
// or tooltip bubble.
BOOL shouldDisplayTooltip_;
// Tracks whether mouse pointer currently hovers above bubble.
BOOL isHoveringOnBubble_;
}
// |message| to display in the tooltip.
@property(copy, nonatomic) NSString* message;
- (id)initWithArrowLocation:(info_bubble::BubbleArrowLocation)arrowLocation;
- (void)setImage:(NSImage*)image;
@end;
#endif // CHROME_BROWSER_UI_COCOA_AUTOFILL_AUTOFILL_TOOLTIP_CONTROLLER_H_
| 499 |
9,959 |
<filename>test/pretty/resource/after/ExoticJava.java
import static java.lang.String.*;
import java.io.*;
class ExoticJava<V> {
public <T> ExoticJava(T genericsInConstructor, V genericsInType) {
System.out.println(new <String>ExoticJava<Integer>("Hello", 5));
}
public void test() {
int x = 5;
int[] y = {10};
;
class MethodLocal implements Serializable, java.util.RandomAccess {
@SuppressWarnings({"unchecked", "rawtypes"})
public final strictfp int foo() {
int x = super.hashCode();
x <<= 5;
do {
x <<= 5;
} while (Boolean.FALSE);
return x;
}
}
for (int i = 10, j[] = {20}; i < 5; i++, j[0]++) {
String z = "";
try (
PrintWriter pw = new PrintWriter(System.out);
PrintWriter p2 = new PrintWriter(System.out)) {
pw.println();
} finally {
synchronized (z) {
System.out.println(z);
}
}
if ((y == null)) {
}
if (((y == null))) ;
{
;
}
java.util.List<String> list = new java.util.ArrayList<>();
assert Boolean.TRUE : "That\'s weird";
double d = -1.8E12;
long loooong = 305441741;
int octal = 87;
}
}
}
| 509 |
36,552 |
//
// Copyright 2015 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <grpc/support/port_platform.h>
#include <grpc/support/log.h>
#include "src/core/ext/filters/client_channel/client_channel.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/surface/api_trace.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/completion_queue.h"
grpc_connectivity_state grpc_channel_check_connectivity_state(
grpc_channel* channel, int try_to_connect) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
GRPC_API_TRACE(
"grpc_channel_check_connectivity_state(channel=%p, try_to_connect=%d)", 2,
(channel, try_to_connect));
// Forward through to the underlying client channel.
grpc_core::ClientChannel* client_channel =
grpc_core::ClientChannel::GetFromChannel(channel);
if (GPR_UNLIKELY(client_channel == nullptr)) {
gpr_log(GPR_ERROR,
"grpc_channel_check_connectivity_state called on something that is "
"not a client channel");
return GRPC_CHANNEL_SHUTDOWN;
}
return client_channel->CheckConnectivityState(try_to_connect);
}
int grpc_channel_num_external_connectivity_watchers(grpc_channel* channel) {
grpc_core::ClientChannel* client_channel =
grpc_core::ClientChannel::GetFromChannel(channel);
if (client_channel == nullptr) {
gpr_log(GPR_ERROR,
"grpc_channel_num_external_connectivity_watchers called on "
"something that is not a client channel");
return 0;
}
return client_channel->NumExternalConnectivityWatchers();
}
int grpc_channel_support_connectivity_watcher(grpc_channel* channel) {
return grpc_core::ClientChannel::GetFromChannel(channel) != nullptr;
}
namespace grpc_core {
namespace {
class StateWatcher {
public:
StateWatcher(grpc_channel* channel, grpc_completion_queue* cq, void* tag,
grpc_connectivity_state last_observed_state,
gpr_timespec deadline)
: channel_(channel), cq_(cq), tag_(tag), state_(last_observed_state) {
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
GRPC_CHANNEL_INTERNAL_REF(channel, "watch_channel_connectivity");
GRPC_CLOSURE_INIT(&on_complete_, WatchComplete, this, nullptr);
GRPC_CLOSURE_INIT(&on_timeout_, TimeoutComplete, this, nullptr);
auto* watcher_timer_init_state = new WatcherTimerInitState(
this, grpc_timespec_to_millis_round_up(deadline));
ClientChannel* client_channel = ClientChannel::GetFromChannel(channel);
GPR_ASSERT(client_channel != nullptr);
client_channel->AddExternalConnectivityWatcher(
grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq)), &state_,
&on_complete_, watcher_timer_init_state->closure());
}
~StateWatcher() {
GRPC_CHANNEL_INTERNAL_UNREF(channel_, "watch_channel_connectivity");
}
private:
// A fire-and-forget object used to delay starting the timer until the
// ClientChannel actually starts the watch.
class WatcherTimerInitState {
public:
WatcherTimerInitState(StateWatcher* state_watcher, grpc_millis deadline)
: state_watcher_(state_watcher), deadline_(deadline) {
GRPC_CLOSURE_INIT(&closure_, WatcherTimerInit, this, nullptr);
}
grpc_closure* closure() { return &closure_; }
private:
static void WatcherTimerInit(void* arg, grpc_error_handle /*error*/) {
auto* self = static_cast<WatcherTimerInitState*>(arg);
grpc_timer_init(&self->state_watcher_->timer_, self->deadline_,
&self->state_watcher_->on_timeout_);
delete self;
}
StateWatcher* state_watcher_;
grpc_millis deadline_;
grpc_closure closure_;
};
enum CallbackPhase { kWaiting, kReadyToCallBack, kCallingBackAndFinished };
// Called when the completion is returned to the CQ.
static void FinishedCompletion(void* arg, grpc_cq_completion* /*ignored*/) {
auto* self = static_cast<StateWatcher*>(arg);
bool should_delete = false;
{
MutexLock lock(&self->mu_);
switch (self->phase_) {
case kWaiting:
case kReadyToCallBack:
GPR_UNREACHABLE_CODE(return );
case kCallingBackAndFinished:
should_delete = true;
}
}
if (should_delete) delete self;
}
void PartlyDone(bool due_to_completion, grpc_error_handle error) {
bool end_op = false;
void* end_op_tag = nullptr;
grpc_error_handle end_op_error = GRPC_ERROR_NONE;
grpc_completion_queue* end_op_cq = nullptr;
grpc_cq_completion* end_op_completion_storage = nullptr;
if (due_to_completion) {
grpc_timer_cancel(&timer_);
} else {
grpc_core::ClientChannel* client_channel =
grpc_core::ClientChannel::GetFromChannel(channel_);
GPR_ASSERT(client_channel != nullptr);
client_channel->CancelExternalConnectivityWatcher(&on_complete_);
}
{
MutexLock lock(&mu_);
if (due_to_completion) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures)) {
GRPC_LOG_IF_ERROR("watch_completion_error", GRPC_ERROR_REF(error));
}
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_NONE;
} else {
if (error == GRPC_ERROR_NONE) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Timed out waiting for connection state change");
} else if (error == GRPC_ERROR_CANCELLED) {
error = GRPC_ERROR_NONE;
}
}
switch (phase_) {
case kWaiting:
GRPC_ERROR_REF(error);
error_ = error;
phase_ = kReadyToCallBack;
break;
case kReadyToCallBack:
if (error != GRPC_ERROR_NONE) {
GPR_ASSERT(!due_to_completion);
GRPC_ERROR_UNREF(error_);
GRPC_ERROR_REF(error);
error_ = error;
}
phase_ = kCallingBackAndFinished;
end_op = true;
end_op_cq = cq_;
end_op_tag = tag_;
end_op_error = error_;
end_op_completion_storage = &completion_storage_;
break;
case kCallingBackAndFinished:
GPR_UNREACHABLE_CODE(return );
}
}
if (end_op) {
grpc_cq_end_op(end_op_cq, end_op_tag, end_op_error, FinishedCompletion,
this, end_op_completion_storage);
}
GRPC_ERROR_UNREF(error);
}
static void WatchComplete(void* arg, grpc_error_handle error) {
auto* self = static_cast<StateWatcher*>(arg);
self->PartlyDone(/*due_to_completion=*/true, GRPC_ERROR_REF(error));
}
static void TimeoutComplete(void* arg, grpc_error_handle error) {
auto* self = static_cast<StateWatcher*>(arg);
self->PartlyDone(/*due_to_completion=*/false, GRPC_ERROR_REF(error));
}
grpc_channel* channel_;
grpc_completion_queue* cq_;
void* tag_;
grpc_connectivity_state state_;
grpc_cq_completion completion_storage_;
grpc_closure on_complete_;
grpc_timer timer_;
grpc_closure on_timeout_;
Mutex mu_;
CallbackPhase phase_ ABSL_GUARDED_BY(mu_) = kWaiting;
grpc_error_handle error_ ABSL_GUARDED_BY(mu_) = GRPC_ERROR_NONE;
};
} // namespace
} // namespace grpc_core
void grpc_channel_watch_connectivity_state(
grpc_channel* channel, grpc_connectivity_state last_observed_state,
gpr_timespec deadline, grpc_completion_queue* cq, void* tag) {
grpc_core::ApplicationCallbackExecCtx callback_exec_ctx;
grpc_core::ExecCtx exec_ctx;
GRPC_API_TRACE(
"grpc_channel_watch_connectivity_state("
"channel=%p, last_observed_state=%d, "
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"cq=%p, tag=%p)",
7,
(channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
(int)deadline.clock_type, cq, tag));
new grpc_core::StateWatcher(channel, cq, tag, last_observed_state, deadline);
}
| 3,478 |
4,054 |
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/log/log.h>
LOG_SETUP("component_test");
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/component/version.h>
#include <vespa/vespalib/component/versionspecification.h>
using namespace vespalib;
void
EXPECT_LT(const VersionSpecification::string &lhs,
const VersionSpecification::string &rhs)
{
EXPECT_TRUE(VersionSpecification(lhs) < VersionSpecification(rhs));
EXPECT_FALSE(VersionSpecification(lhs) == VersionSpecification(rhs));
EXPECT_FALSE(VersionSpecification(rhs) < VersionSpecification(lhs));
}
void
EXPECT_EQ(const VersionSpecification::string &lhs,
const VersionSpecification::string &rhs)
{
EXPECT_FALSE(VersionSpecification(lhs) < VersionSpecification(rhs));
EXPECT_TRUE(VersionSpecification(lhs) == VersionSpecification(rhs));
EXPECT_FALSE(VersionSpecification(rhs) < VersionSpecification(lhs));
}
void
EXPECT_NE(const VersionSpecification::string &lhs,
const VersionSpecification::string &rhs)
{
EXPECT_TRUE(VersionSpecification(lhs) < VersionSpecification(rhs) ||
VersionSpecification(rhs) < VersionSpecification(lhs));
EXPECT_FALSE(VersionSpecification(lhs) == VersionSpecification(rhs));
}
void
EXPECT_GT(const VersionSpecification::string &lhs,
const VersionSpecification::string &rhs)
{
EXPECT_FALSE(VersionSpecification(lhs) < VersionSpecification(rhs));
EXPECT_FALSE(VersionSpecification(lhs) == VersionSpecification(rhs));
EXPECT_TRUE(VersionSpecification(rhs) < VersionSpecification(lhs));
}
TEST("requireThatCompareToIsSymmetric")
{
EXPECT_LT("1", "2");
EXPECT_EQ("2", "2");
EXPECT_GT("2", "1");
EXPECT_LT("1.2", "3.4");
EXPECT_EQ("3.4", "3.4");
EXPECT_GT("3.4", "1.2");
EXPECT_LT("1.2.3", "4.5.6");
EXPECT_EQ("4.5.6", "4.5.6");
EXPECT_GT("4.5.6", "1.2.3");
EXPECT_LT("1.2.3.4", "5.6.7.8");
EXPECT_EQ("5.6.7.8", "5.6.7.8");
EXPECT_GT("5.6.7.8", "1.2.3.4");
}
TEST("requireThatCompareToIsTransitive")
{
EXPECT_LT("1", "2");
EXPECT_LT("2", "3");
EXPECT_LT("1", "3");
EXPECT_LT("1.1", "1.2");
EXPECT_LT("1.2", "1.3");
EXPECT_LT("1.1", "1.3");
EXPECT_LT("1.1.1", "1.1.2");
EXPECT_LT("1.1.2", "1.1.3");
EXPECT_LT("1.1.1", "1.1.3");
EXPECT_LT("1.1.1.1", "1.1.1.2");
EXPECT_LT("1.1.1.2", "1.1.1.3");
EXPECT_LT("1.1.1.1", "1.1.1.3");
}
TEST("requireThatUnspecifiedComponentDoesNotMatchSpecified")
{
EXPECT_EQ("1", "1");
EXPECT_NE("1", "1.2");
EXPECT_NE("1", "1.2.3");
EXPECT_NE("1", "1.2.3.4");
EXPECT_NE("1.2", "1");
EXPECT_EQ("1.2", "1.2");
EXPECT_NE("1.2", "1.2.3");
EXPECT_NE("1.2", "1.2.3.4");
EXPECT_NE("1.2.3", "1");
EXPECT_NE("1.2.3", "1.2");
EXPECT_EQ("1.2.3", "1.2.3");
EXPECT_NE("1.2.3", "1.2.3.4");
EXPECT_NE("1.2.3.4", "1");
EXPECT_NE("1.2.3.4", "1.2");
EXPECT_NE("1.2.3.4", "1.2.3");
EXPECT_EQ("1.2.3.4", "1.2.3.4");
}
TEST("testText")
{
VersionSpecification v("0.1.2.3");
EXPECT_EQUAL(0, v.getMajor());
EXPECT_EQUAL(1, v.getMinor());
EXPECT_EQUAL(2, v.getMicro());
EXPECT_EQUAL("3", v.getQualifier());
v = VersionSpecification("1.2.3.4");
EXPECT_EQUAL(1, v.getMajor());
EXPECT_EQUAL(2, v.getMinor());
EXPECT_EQUAL(3, v.getMicro());
EXPECT_EQUAL("4", v.getQualifier());
v = VersionSpecification("1");
EXPECT_EQUAL(1, v.getMajor());
EXPECT_EQUAL(0, v.getMinor());
EXPECT_EQUAL(0, v.getMicro());
EXPECT_EQUAL("", v.getQualifier());
EXPECT_EXCEPTION(v = VersionSpecification("-1"), IllegalArgumentException, "integer must start with a digit");
EXPECT_EXCEPTION(v = VersionSpecification("1.-1"), IllegalArgumentException, "integer must start with a digit");
EXPECT_EXCEPTION(v = VersionSpecification("1.2.-1"), IllegalArgumentException, "integer must start with a digit");
EXPECT_EXCEPTION(v = VersionSpecification("1.2.3.-1"), IllegalArgumentException, "Invalid character in qualifier");
}
TEST("testText2")
{
Version v("0.1.2.3");
EXPECT_EQUAL(0, v.getMajor());
EXPECT_EQUAL(1, v.getMinor());
EXPECT_EQUAL(2, v.getMicro());
EXPECT_EQUAL("3", v.getQualifier());
v = Version("1.2.3.4");
EXPECT_EQUAL(1, v.getMajor());
EXPECT_EQUAL(2, v.getMinor());
EXPECT_EQUAL(3, v.getMicro());
EXPECT_EQUAL("4", v.getQualifier());
v = Version("1");
EXPECT_EQUAL(1, v.getMajor());
EXPECT_EQUAL(0, v.getMinor());
EXPECT_EQUAL(0, v.getMicro());
EXPECT_EQUAL("", v.getQualifier());
EXPECT_EXCEPTION(v = Version("-1"), IllegalArgumentException, "integer must start with a digit");
EXPECT_EXCEPTION(v = Version("1.-1"), IllegalArgumentException, "integer must start with a digit");
EXPECT_EXCEPTION(v = Version("1.2.-1"), IllegalArgumentException, "integer must start with a digit");
EXPECT_EXCEPTION(v = Version("1.2.3.-1"), IllegalArgumentException, "Invalid character in qualifier");
}
TEST("testEmpty")
{
Version ev;
VersionSpecification evs;
EXPECT_EQUAL("", ev.toString());
EXPECT_EQUAL("*.*.*", evs.toString());
EXPECT_TRUE(ev == Version(0,0,0,""));
EXPECT_TRUE(evs.matches(ev));
EXPECT_TRUE(evs.matches(Version(1,2,3)));
EXPECT_TRUE(!evs.matches(Version(1,2,3,"foo")));
}
TEST("testSimple")
{
// test Version:
Version v(1, 2, 3, "qualifier");
EXPECT_EQUAL(1, v.getMajor());
EXPECT_EQUAL(2, v.getMinor());
EXPECT_EQUAL(3, v.getMicro());
EXPECT_EQUAL("qualifier", v.getQualifier());
EXPECT_EQUAL("1.2.3.qualifier", v.toString());
// test VersionSpecification:
VersionSpecification vs(1, 2, 3, "qualifier");
EXPECT_EQUAL(1, vs.getMajor());
EXPECT_EQUAL(2, vs.getMinor());
EXPECT_EQUAL(3, vs.getMicro());
EXPECT_EQUAL("qualifier", vs.getQualifier());
EXPECT_EQUAL(1, vs.getSpecifiedMajor());
EXPECT_EQUAL(2, vs.getSpecifiedMinor());
EXPECT_EQUAL(3, vs.getSpecifiedMicro());
EXPECT_EQUAL("1.2.3.qualifier", vs.toString());
// test cross-class function
EXPECT_TRUE(vs.matches(v));
}
TEST_MAIN()
{
TEST_RUN_ALL();
}
| 2,831 |
1,179 |
<filename>kernel/src/ext_t.hpp
/// @copyright
/// Copyright (C) 2020 Assured Information Security, Inc.
///
/// @copyright
/// Permission is hereby granted, free of charge, to any person obtaining a copy
/// of this software and associated documentation files (the "Software"), to deal
/// in the Software without restriction, including without limitation the rights
/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
/// copies of the Software, and to permit persons to whom the Software is
/// furnished to do so, subject to the following conditions:
///
/// @copyright
/// The above copyright notice and this permission notice shall be included in
/// all copies or substantial portions of the Software.
///
/// @copyright
/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
/// SOFTWARE.
#ifndef EXT_T_HPP
#define EXT_T_HPP
#include <alloc_huge_t.hpp>
#include <alloc_page_t.hpp>
#include <basic_alloc_page_t.hpp>
#include <basic_page_4k_t.hpp>
#include <basic_root_page_table_t.hpp>
#include <bf_constants.hpp>
#include <bfelf/elf64_ehdr_t.hpp>
#include <bfelf/elf64_phdr_t.hpp>
#include <call_ext.hpp>
#include <ext_tcb_t.hpp>
#include <huge_pool_t.hpp>
#include <intrinsic_t.hpp>
#include <map_page_flags.hpp>
#include <mk_args_t.hpp>
#include <page_4k_t.hpp>
#include <page_aligned_bytes_t.hpp>
#include <page_pool_t.hpp>
#include <root_page_table_t.hpp>
#include <tls_t.hpp>
#include <bsl/array.hpp>
#include <bsl/convert.hpp>
#include <bsl/cstring.hpp>
#include <bsl/debug.hpp>
#include <bsl/discard.hpp>
#include <bsl/ensures.hpp>
#include <bsl/errc_type.hpp>
#include <bsl/expects.hpp>
#include <bsl/finally.hpp>
#include <bsl/safe_idx.hpp>
#include <bsl/safe_integral.hpp>
#include <bsl/span.hpp>
#include <bsl/touch.hpp>
#include <bsl/unlikely.hpp>
/// TODO:
/// - Add support for multiple extensions. For this to work, we will need
/// support for PCID and the global flag should be turned off. This will
/// ensure that swaps to another extension (which require a CR3 change),
/// will not destroy performance. To ensure the hypervisor can support
/// systems without PCID, projects that use more than one extension like
/// MicroV should compile the additional extensions into both the main
/// extension, and the additional ones. On systems that don't have PCID,
/// it would call itself. Systems with PCID would call through IPC. You
/// could then have compile/runtime flags for forcing one path over the
/// other in situations where performace or security take precedence.
/// - Since the microkernel doesn't have a timer, the only way another
/// extension will execute is from some sort of IPC interface where an
/// extension calls into another extension to perform an action and then
/// returns a result. The best way to handle this would be to use an
/// instruction sequence similar to a VMCall and VMExit. The extension
/// would execute bf_ipc_op_call, which could take at most 6 arguments
/// that match the SysV calling conventions. The additional extension
/// would execute and then return using bf_ipc_op_return. There would
/// need to be some logic in the syscall code to make sure that this
/// return function was used properly (meaning you cannot return unless
/// you have been called, and you cannot run if you have been called).
/// From there, all that is finally needed is some way to share memory.
/// There are two options here. Shared memory, or a memcpy ABI. IMO, we
/// should use a memcpy ABI as shared memory really complicates things.
/// If shared memory is used, we should make sure, that like freeing a
/// page, unmapping shared memory is optional, meaning the microkernel
/// is nor required to honor the request.
/// - The TLS block that we use for the general purpose registers will need
/// to be shared as it is currently a problem. This way, a swap to another
/// extension only has to update the extension ID in that block, and then
/// swap CR3. The best way to handle this would be to have the extension
/// pool can allocate the shared portion of the TLS blocks for all of the
/// online PPs and then give this page to each extension as it initializes.
/// Then all we have to do is make sure that there is no state in there that
/// would be a problem for the extensions to share, which right now is just
/// the extension ID. If additional state is added to the ABI that is a
/// problem, we will either have to copy the entire block on each swap,
/// or make add a second page to the ABI, one that is shared, and one that
/// is not.
namespace mk
{
/// <!-- description -->
/// @brief Defines an extension WRT to the microkernel. Whenever an
/// executes, it must go through this class to do so. This class
/// also maintains all of the resources given to an extension, as
/// well as the extension's memory map, ELF file, stack, TLS blocks,
/// and all of it's memory map functions.
///
class ext_t final
{
/// @brief stores the ID associated with this ext_t
bsl::safe_u16 m_id{};
/// @brief stores the extension's handle
bsl::safe_u64 m_handle{};
/// @brief stores true if start() has been executed
bool m_has_executed_start{};
/// @brief stores true if fail_entry() is being executed
bool m_is_executing_fail{};
/// @brief stores the main rpt
root_page_table_t m_main_rpt{};
/// @brief stores the direct map rpts
bsl::array<root_page_table_t, HYPERVISOR_MAX_VMS.get()> m_direct_map_rpts{};
/// @brief stores the main IP registered by the extension
bsl::safe_u64 m_entry_ip{};
/// @brief stores the bootstrap IP registered by the extension
bsl::safe_u64 m_bootstrap_ip{};
/// @brief stores the vmexit IP registered by the extension
bsl::safe_u64 m_vmexit_ip{};
/// @brief stores the fail IP registered by the extension
bsl::safe_u64 m_fail_ip{};
/// @brief stores the direct map rpts
bsl::array<bsl::span<page_4k_t>, HYPERVISOR_MAX_HUGE_ALLOCS.get()> m_huge_allocs{};
/// @brief stores the index into m_huge_allocs
bsl::safe_idx m_huge_allocs_idx{};
/// <!-- description -->
/// @brief Returns the program header table
///
/// <!-- inputs/outputs -->
/// @param file the ELF file to get the program header table from
/// @return Returns the program header table
///
[[nodiscard]] static constexpr auto
get_phdrtab(bfelf::elf64_ehdr_t const *const file) noexcept
-> bsl::span<bfelf::elf64_phdr_t const>
{
return {file->e_phdr, bsl::to_umx(file->e_phnum)};
}
/// <!-- description -->
/// @brief Returns "size" as a "page_aligned_bytes_t"
///
/// <!-- inputs/outputs -->
/// @param size the number of bytes to convert
/// @return Returns "size" as a "page_aligned_bytes_t". On error,
/// check the bytes field.
///
[[nodiscard]] static constexpr auto
size_to_page_aligned_bytes(bsl::safe_umx const &size) noexcept -> page_aligned_bytes_t
{
bsl::safe_umx mut_pages{};
if ((size % HYPERVISOR_PAGE_SIZE).checked().is_zero()) {
mut_pages = size >> HYPERVISOR_PAGE_SHIFT;
}
else {
mut_pages = (size >> HYPERVISOR_PAGE_SHIFT) + bsl::safe_umx::magic_1();
}
/// NOTE:
/// - We do not validate the bytes field. This is because the size
/// field could be anything as it comes from the syscall
/// interface, which means it absolutely could overflow. Callers
/// of this function will have to check for this.
/// - We do mark pages as checked as it is impossible for it to
/// overflow.
return {mut_pages * HYPERVISOR_PAGE_SIZE, mut_pages.checked()};
}
/// <!-- description -->
/// @brief Checks whether or not a given ELF file is in a format that
/// this ELF loader can handle.
///
/// <!-- inputs/outputs -->
/// @param file a pointer to the elf file
///
static constexpr void
validate_elf64_ehdr(bfelf::elf64_ehdr_t const *const file) noexcept
{
bsl::expects(nullptr != file);
bsl::expects(file->e_type == bfelf::ET_EXEC);
bsl::expects(*file->e_ident.at_if(bfelf::EI_MAG0) == bfelf::ELFMAG0);
bsl::expects(*file->e_ident.at_if(bfelf::EI_MAG1) == bfelf::ELFMAG1);
bsl::expects(*file->e_ident.at_if(bfelf::EI_MAG2) == bfelf::ELFMAG2);
bsl::expects(*file->e_ident.at_if(bfelf::EI_MAG3) == bfelf::ELFMAG3);
bsl::expects(*file->e_ident.at_if(bfelf::EI_CLASS) == bfelf::ELFCLASS64);
bsl::expects(*file->e_ident.at_if(bfelf::EI_OSABI) == bfelf::ELFOSABI_SYSV);
}
/// <!-- description -->
/// @brief Validates the provided pt_load segment.
///
/// <!-- inputs/outputs -->
/// @param phdr the pt_load segment to validate
///
static constexpr void
validate_pt_load(bfelf::elf64_phdr_t const *const phdr) noexcept
{
bsl::expects(nullptr != phdr);
constexpr auto min_vaddr{HYPERVISOR_EXT_CODE_ADDR};
constexpr auto max_vaddr{(min_vaddr + HYPERVISOR_EXT_CODE_SIZE).checked()};
bsl::expects((phdr->p_vaddr) >= min_vaddr);
bsl::expects((phdr->p_vaddr + bsl::to_umx(phdr->p_memsz)).checked() <= max_vaddr);
if (bsl::safe_u32::magic_0() != (phdr->p_flags & bfelf::PF_W)) {
bsl::expects(bsl::safe_u32::magic_0() == (phdr->p_flags & bfelf::PF_X));
}
else {
bsl::touch();
}
if (bsl::safe_u32::magic_0() != (phdr->p_flags & bfelf::PF_X)) {
bsl::expects(bsl::safe_u32::magic_0() == (phdr->p_flags & bfelf::PF_W));
}
else {
bsl::touch();
}
bsl::expects(phdr->p_align == HYPERVISOR_PAGE_SIZE);
}
/// <!-- description -->
/// @brief Validates the provided pt_gnu_stack segment.
///
/// <!-- inputs/outputs -->
/// @param phdr the pt_gnu_stack segment to validate
///
static constexpr void
validate_pt_gnu_stack(bfelf::elf64_phdr_t const *const phdr) noexcept
{
bsl::expects(nullptr != phdr);
bsl::expects(bsl::safe_u32::magic_0() == (phdr->p_flags & bfelf::PF_X));
}
/// <!-- description -->
/// @brief Validates the provided pt_tls segment.
///
/// <!-- inputs/outputs -->
/// @param phdr the pt_tls segment to validate
///
static constexpr void
validate_pt_tls(bfelf::elf64_phdr_t const *const phdr) noexcept
{
bsl::expects(nullptr != phdr);
bsl::expects(phdr->p_memsz <= HYPERVISOR_PAGE_SIZE);
bsl::expects(bsl::safe_u32::magic_0() == (phdr->p_flags & bfelf::PF_X));
}
/// <!-- description -->
/// @brief Validates the provided ELF file.
///
/// <!-- inputs/outputs -->
/// @param file the elf file to validate
///
static constexpr void
validate(loader::ext_elf_file_t const *const file) noexcept
{
/// NOTE:
/// - The point of this function is to provide some sanity checks
/// in debug mode which is why everything uses bsl::expects.
/// None of these are needed in a release build because it
/// will have gone through testing to ensure they all pass.
/// - Removing this logic in a release build helps to keep the
/// binary size smaller.
///
bsl::expects(nullptr != file);
validate_elf64_ehdr(file);
auto const phdrtab{get_phdrtab(file)};
for (bsl::safe_idx mut_i{}; mut_i < phdrtab.size(); ++mut_i) {
auto const *const phdr{phdrtab.at_if(mut_i)};
switch (phdr->p_type) {
case bfelf::PT_LOAD.get(): {
validate_pt_load(phdr);
break;
}
case bfelf::PT_GNU_STACK.get(): {
validate_pt_gnu_stack(phdr);
break;
}
case bfelf::PT_TLS.get(): {
validate_pt_tls(phdr);
break;
}
default: {
break;
}
}
}
}
/// <!-- description -->
/// @brief Allocate the a page of RW or RW memory for the segment
/// being loaded.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to add too
/// @param phdr the pt_load segment to add
/// @param offset the offset in the segment being allocated
/// @return Returns a pointer to the newly allocated page, or a
/// nullptr on failure.
///
[[nodiscard]] static constexpr auto
allocate_page_for_add_segment(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
root_page_table_t &mut_rpt,
bfelf::elf64_phdr_t const *const phdr,
bsl::safe_idx const &offset) noexcept -> page_4k_t *
{
page_4k_t *pmut_mut_page{};
/// NOTE:
/// - The validation code above ensures that phdr->p_vaddr +
/// the offset will never overflow, which is why this is
/// marked as checked.
///
auto const virt{(phdr->p_vaddr + bsl::to_umx(offset)).checked()};
if ((phdr->p_flags & bfelf::PF_X).is_pos()) {
pmut_mut_page =
mut_rpt.allocate_page<page_4k_t>(mut_tls, mut_page_pool, virt, MAP_PAGE_RE);
}
else {
pmut_mut_page =
mut_rpt.allocate_page<page_4k_t>(mut_tls, mut_page_pool, virt, MAP_PAGE_RW);
}
return pmut_mut_page;
}
/// <!-- description -->
/// @brief Adds all of the program segments given an ELF file to
/// the provided root page table.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to add too
/// @param phdr the pt_load segment to add
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] static constexpr auto
add_segment(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
root_page_table_t &mut_rpt,
bfelf::elf64_phdr_t const *const phdr) noexcept -> bsl::errc_type
{
constexpr auto inc{bsl::to_idx(HYPERVISOR_PAGE_SIZE)};
bsl::span const segment{phdr->p_offset, bsl::to_umx(phdr->p_filesz)};
for (bsl::safe_idx mut_i{}; mut_i < phdr->p_memsz; mut_i += inc) {
auto *const pmut_page{
allocate_page_for_add_segment(mut_tls, mut_page_pool, mut_rpt, phdr, mut_i)};
if (bsl::unlikely(nullptr == pmut_page)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
/// NOTE:
/// - Due to the BSS section, the memsz might not actually
/// be the same size as the filesz. For this reason, we
/// need to keep allocating pages, but might not want to
/// copy these pages.
/// - The subspan figures this out for us. Once the file has
/// been completely copies, the subspan will start
/// returning an empty subspan, telling us to stop copying.
///
auto const src{segment.subspan(mut_i, HYPERVISOR_PAGE_SIZE)};
if (src.empty()) {
continue;
}
bsl::builtin_memcpy(pmut_page->data.data(), src.data(), src.size());
}
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Adds all of the program segments given an ELF file to
/// the provided root page table.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to add too
/// @param file the ELF file for this ext_t
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] static constexpr auto
add_segments(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
root_page_table_t &mut_rpt,
loader::ext_elf_file_t const *const file) noexcept -> bsl::errc_type
{
auto mut_tls_vaddr{bsl::safe_u64::max_value()};
auto const phdrtab{get_phdrtab(file)};
for (bsl::safe_idx mut_i{}; mut_i < phdrtab.size(); ++mut_i) {
auto const *const phdr{phdrtab.at_if(mut_i)};
if (bfelf::PT_TLS == phdr->p_type) {
mut_tls_vaddr = bsl::to_u64(phdr->p_vaddr);
break;
}
bsl::touch();
}
for (bsl::safe_idx mut_i{}; mut_i < phdrtab.size(); ++mut_i) {
auto const *const phdr{phdrtab.at_if(mut_i)};
if (bfelf::PT_LOAD != phdr->p_type) {
continue;
}
/// NOTE:
/// - Sometimes, you can end up with a PT_LOAD segment that
/// is actually the TLS. It will have an alignment that
/// is not supported as well. We need to skip these.
///
if (phdr->p_vaddr == mut_tls_vaddr) {
continue;
}
auto const ret{add_segment(mut_tls, mut_page_pool, mut_rpt, phdr)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
bsl::touch();
}
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Adds an exteneion's stack for a specific PP to the
/// provided root page table at the provided address.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to add too
/// @param addr the address of where to put the stack
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] static constexpr auto
add_stack(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
root_page_table_t &mut_rpt,
bsl::safe_u64 const &addr) noexcept -> bsl::errc_type
{
constexpr auto size{HYPERVISOR_EXT_STACK_SIZE};
for (bsl::safe_idx mut_i{}; mut_i < size; mut_i += bsl::to_idx(HYPERVISOR_PAGE_SIZE)) {
auto const virt{(addr + bsl::to_u64(mut_i)).checked()};
/// NOTE:
/// - The virtual address provided to allocate_page cannot
/// overflow because add_stacks ensures that this is not
/// possible, which is why it is marked as checked.
///
auto const *const page{
mut_rpt.allocate_page<page_4k_t>(mut_tls, mut_page_pool, virt, MAP_PAGE_RW)};
if (bsl::unlikely(nullptr == page)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
bsl::touch();
}
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Adds the exteneion's stacks to the provided
/// root page table.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to add too
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
add_stacks(tls_t &mut_tls, page_pool_t &mut_page_pool, root_page_table_t &mut_rpt) noexcept
-> bsl::errc_type
{
constexpr auto stack_addr{HYPERVISOR_EXT_STACK_ADDR};
constexpr auto stack_size{HYPERVISOR_EXT_STACK_SIZE};
for (bsl::safe_idx mut_i{}; mut_i < bsl::to_idx(mut_tls.online_pps); ++mut_i) {
auto const offs{(stack_size + HYPERVISOR_PAGE_SIZE) * bsl::to_u64(mut_i)};
auto const addr{(stack_addr + offs).checked()};
/// NOTE:
/// - CMake is responsible for ensuring that the values for
/// stack_addr and stack_size make sense. The only way the
/// the math above could overflow is if the provided online
/// PPs is invalid while at the same time CMake was
/// configured with values that could result in overflow.
/// This is considered extremely unlikely and therefore
/// undefined, which is why addr is marked as checked.
///
auto const ret{this->add_stack(mut_tls, mut_page_pool, mut_rpt, addr)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
bsl::touch();
}
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Adds an exteneion's fail stack for a specific PP to the
/// provided root page table at the provided address.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to add too
/// @param addr the address of where to put the stack
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] static constexpr auto
add_fail_stack(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
root_page_table_t &mut_rpt,
bsl::safe_u64 const &addr) noexcept -> bsl::errc_type
{
constexpr auto size{HYPERVISOR_EXT_FAIL_STACK_SIZE};
for (bsl::safe_idx mut_i{}; mut_i < size; mut_i += bsl::to_idx(HYPERVISOR_PAGE_SIZE)) {
auto const virt{(addr + bsl::to_u64(mut_i)).checked()};
/// NOTE:
/// - The virtual address provided to allocate_page cannot
/// overflow because add_fail_stacks ensures that this is not
/// possible, which is why it is marked as checked.
///
auto const *const page{
mut_rpt.allocate_page<page_4k_t>(mut_tls, mut_page_pool, virt, MAP_PAGE_RW)};
if (bsl::unlikely(nullptr == page)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
bsl::touch();
}
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Adds the exteneion's fail stacks to the provided
/// root page table.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to add too
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
add_fail_stacks(
tls_t &mut_tls, page_pool_t &mut_page_pool, root_page_table_t &mut_rpt) noexcept
-> bsl::errc_type
{
constexpr auto stack_addr{HYPERVISOR_EXT_FAIL_STACK_ADDR};
constexpr auto stack_size{HYPERVISOR_EXT_FAIL_STACK_SIZE};
for (bsl::safe_idx mut_i{}; mut_i < bsl::to_idx(mut_tls.online_pps); ++mut_i) {
auto const offs{(stack_size + HYPERVISOR_PAGE_SIZE) * bsl::to_u64(mut_i)};
auto const addr{(stack_addr + offs).checked()};
/// NOTE:
/// - CMake is responsible for ensuring that the values for
/// stack_addr and stack_size make sense. The only way the
/// the math above could overflow is if the provided online
/// PPs is invalid while at the same time CMake was
/// configured with values that could result in overflow.
/// This is considered extremely unlikely and therefore
/// undefined, which is why addr is marked as checked.
///
auto const ret{this->add_fail_stack(mut_tls, mut_page_pool, mut_rpt, addr)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
bsl::touch();
}
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Adds an exteneion's TLS (has nothing to do with the
/// microkernel's TLS). Remember that each extension has two pages
/// of TLS block stuff. One page for TLS data, which is anything
/// that an extension defines with thread_local and will show up
/// in the ELF file as a program segment, and one page for the
/// Thread Control Block (TCB) which stores a pointer that is
/// needed by thread_local as well as TLS data that is defined by
/// the ABI like the general purpose registers. This adds the TLS
/// data in that first page an is only called when an extension
/// actually uses thread_local. From a memory layout point of view,
/// the TLS data comes first (right justified), then the "tp" which
/// is the value written to FS and is a self pointer, and then the
/// TCB data, which define in the ABI.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to add too
/// @param addr the address of the TLS data
/// @param phdr the TLS segment to copy TLS data from
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] static constexpr auto
add_tls(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
root_page_table_t &mut_rpt,
bsl::safe_u64 const &addr,
bfelf::elf64_phdr_t const *const phdr) noexcept -> bsl::errc_type
{
auto *const pmut_page{
mut_rpt.allocate_page<page_4k_t>(mut_tls, mut_page_pool, addr, MAP_PAGE_RW)};
if (bsl::unlikely(nullptr == pmut_page)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
/// NOTE:
/// - Since the validation code above ensures that the TLB block
/// in the phdr is no larger than a page, dst_idx cannot
/// underflow, which is why it is marked as checked().
///
bsl::span const src{phdr->p_offset, bsl::to_umx(phdr->p_filesz)};
/// NOTE:
/// - The dst_idx is needed because the TLS data is in a sense,
/// right justified. Meaning, we allocate a full page, but if
/// the extension only uses 100 bytes, the data starts at the
/// last 100 bytes of the page.
///
auto const dst_idx{bsl::to_idx((HYPERVISOR_PAGE_SIZE - phdr->p_memsz).checked())};
bsl::builtin_memcpy(pmut_page->data.at_if(dst_idx), src.data(), src.size());
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Adds an exteneion's TLS block for a specific PP to the
/// provided root page table at the provided address.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to add too
/// @param addr the address of the TCB portion of the TLS block
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] static constexpr auto
add_tcb(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
root_page_table_t &mut_rpt,
bsl::safe_u64 const &addr) noexcept -> bsl::errc_type
{
auto *const pmut_page{
mut_rpt.allocate_page<ext_tcb_t>(mut_tls, mut_page_pool, addr, MAP_PAGE_RW)};
if (bsl::unlikely(nullptr == pmut_page)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
pmut_page->tp = addr.get();
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Adds the exteneion's TLS block to the provided
/// root page table.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to add too
/// @param file the ELF file that contains the TLS info
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
add_tls_blocks(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
root_page_table_t &mut_rpt,
loader::ext_elf_file_t const *const file) noexcept -> bsl::errc_type
{
bfelf::elf64_phdr_t const *mut_phdr{};
constexpr auto tls_addr{HYPERVISOR_EXT_TLS_ADDR};
constexpr auto tls_size{HYPERVISOR_EXT_TLS_SIZE};
for (bsl::safe_idx mut_i{}; mut_i < bsl::to_umx(mut_tls.online_pps); ++mut_i) {
auto const offs{(tls_size + HYPERVISOR_PAGE_SIZE) * bsl::to_u64(mut_i)};
auto const addr{(tls_addr + offs + HYPERVISOR_PAGE_SIZE).checked()};
/// NOTE:
/// - CMake is responsible for ensuring that the values for
/// tls_addr and tls_size make sense. The only way the
/// the math above could overflow is if the provided online
/// PPs is invalid while at the same time CMake was
/// configured with values that could result in overflow.
/// This is considered extremely unlikely and therefore
/// undefined, which is why addr is marked as checked.
///
auto const ret{this->add_tcb(mut_tls, mut_page_pool, mut_rpt, addr)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return ret;
}
bsl::touch();
}
auto const phdrtab{get_phdrtab(file)};
for (bsl::safe_idx mut_i{}; mut_i < phdrtab.size(); ++mut_i) {
auto const *const phdr{phdrtab.at_if(mut_i)};
if (bfelf::PT_TLS == phdr->p_type) {
mut_phdr = phdr;
break;
}
bsl::touch();
}
if (nullptr == mut_phdr) {
return bsl::errc_success;
}
for (bsl::safe_idx mut_i{}; mut_i < bsl::to_umx(mut_tls.online_pps); ++mut_i) {
auto const offs{(tls_size + HYPERVISOR_PAGE_SIZE) * bsl::to_u64(mut_i)};
auto const addr{(tls_addr + offs).checked()};
/// NOTE:
/// - CMake is responsible for ensuring that the values for
/// tls_addr and tls_size make sense. The only way the
/// the math above could overflow is if the provided online
/// PPs is invalid while at the same time CMake was
/// configured with values that could result in overflow.
/// This is considered extremely unlikely and therefore
/// undefined, which is why addr is marked as checked.
///
auto const ret{this->add_tls(mut_tls, mut_page_pool, mut_rpt, addr, mut_phdr)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return ret;
}
bsl::touch();
}
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Initializes a root page table to support the execution
/// of this extension.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_rpt the root page table to initialize
/// @param system_rpt the system root page table to initialize with
/// @param file the ELF file that contains the segment and TLS
/// info need to initialize the provided rpt
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
initialize_rpt(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
root_page_table_t &mut_rpt,
root_page_table_t const &system_rpt,
loader::ext_elf_file_t const *const file) noexcept -> bsl::errc_type
{
bsl::errc_type mut_ret{};
mut_ret = mut_rpt.initialize(mut_tls, mut_page_pool);
if (bsl::unlikely(!mut_ret)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
bsl::finally mut_release_on_error{
[&mut_tls, &mut_rpt, &mut_page_pool]() noexcept -> void {
mut_rpt.release(mut_tls, mut_page_pool);
}};
mut_rpt.add_tables(mut_tls, system_rpt);
mut_ret = this->add_segments(mut_tls, mut_page_pool, mut_rpt, file);
if (bsl::unlikely(!mut_ret)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
mut_ret = this->add_stacks(mut_tls, mut_page_pool, mut_rpt);
if (bsl::unlikely(!mut_ret)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
mut_ret = this->add_fail_stacks(mut_tls, mut_page_pool, mut_rpt);
if (bsl::unlikely(!mut_ret)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
mut_ret = this->add_tls_blocks(mut_tls, mut_page_pool, mut_rpt, file);
if (bsl::unlikely(!mut_ret)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::errc_failure;
}
mut_release_on_error.ignore();
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Initializes a direct map root page table to support the
/// execution of this extension (with the inclusion of a direct
/// map).
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param pmut_rpt the root page table to initialize
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
initialize_direct_map_rpt(
tls_t &mut_tls, page_pool_t &mut_page_pool, root_page_table_t *const pmut_rpt) noexcept
-> bsl::errc_type
{
auto const ret{pmut_rpt->initialize(mut_tls, mut_page_pool)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return ret;
}
pmut_rpt->add_tables(mut_tls, m_main_rpt);
return bsl::errc_success;
}
/// <!-- description -->
/// @brief Makes sure that all m_main_rpt aliases are updated
/// in all of the direct maps. This ensures that any allocations
/// are mapped into all VMs.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
///
constexpr void
update_direct_map_rpts(tls_t &mut_tls) noexcept
{
for (auto &mut_rpt : m_direct_map_rpts) {
if (!mut_rpt.is_initialized()) {
continue;
}
mut_rpt.add_tables(mut_tls, m_main_rpt);
}
}
/// <!-- description -->
/// @brief Executes the extension given an instruction pointer to
/// execute the extension at, a stack pointer to execute the
/// extension with, and a root page table defining the memory
/// layout to execute the extension with.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_intrinsic the intrinsic_t to use
/// @param ip the instruction pointer defining where in the
/// extension to start execution at.
/// @param arg0 the first argument to pass the extension
/// @param arg1 the second argument to pass the extension
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
execute(
tls_t &mut_tls,
intrinsic_t &mut_intrinsic,
bsl::safe_u64 const &ip,
bsl::safe_u64 const &arg0 = {},
bsl::safe_u64 const &arg1 = {}) noexcept -> bsl::errc_type
{
bsl::expects(ip.is_valid_and_checked());
bsl::expects(ip.is_pos());
bsl::expects(arg0.is_valid_and_checked());
bsl::expects(arg1.is_valid_and_checked());
auto *const pmut_rpt{m_direct_map_rpts.at_if(bsl::to_idx(mut_tls.active_vmid))};
bsl::expects(nullptr != pmut_rpt);
if (pmut_rpt->is_inactive(mut_tls)) {
pmut_rpt->activate(mut_tls, mut_intrinsic);
}
else {
bsl::touch();
}
if (mut_tls.ext != this) {
mut_tls.ext = this;
mut_tls.active_extid = this->id().get();
}
else {
bsl::touch();
}
if (ip == m_fail_ip) {
return call_ext(ip.get(), mut_tls.ext_fail_sp, arg0.get(), arg1.get());
}
return call_ext(ip.get(), mut_tls.sp, arg0.get(), arg1.get());
}
public:
/// <!-- description -->
/// @brief Initializes this ext_t
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param i the ID for this ext_t
/// @param file the ELF file for this ext_t
/// @param system_rpt the system RPT provided by the loader
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
initialize(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
bsl::safe_u16 const &i,
loader::ext_elf_file_t const *const file,
root_page_table_t const &system_rpt) noexcept -> bsl::errc_type
{
bsl::expects(i.is_valid_and_checked());
bsl::expects(i != syscall::BF_INVALID_ID);
bsl::expects(nullptr != file);
validate(file);
m_entry_ip = file->e_entry;
auto const ret{
this->initialize_rpt(mut_tls, mut_page_pool, m_main_rpt, system_rpt, file)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return ret;
}
m_id = ~i;
m_handle = syscall::BF_INVALID_HANDLE;
return ret;
}
/// <!-- description -->
/// @brief Release the ext_t
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_huge_pool the huge_pool_t to use
///
constexpr void
release(tls_t &mut_tls, page_pool_t &mut_page_pool, huge_pool_t &mut_huge_pool) noexcept
{
for (bsl::safe_idx mut_i; mut_i < m_huge_allocs_idx; ++mut_i) {
auto *const pmut_huge{m_huge_allocs.at_if(mut_i)};
auto const huge_phys{mut_huge_pool.virt_to_phys(pmut_huge->data())};
bsl::expects(huge_phys.is_valid_and_checked());
bsl::expects(huge_phys.is_pos());
auto const huge_virt{(HYPERVISOR_EXT_HUGE_POOL_ADDR + huge_phys).checked()};
bsl::expects(huge_virt.is_valid_and_checked());
bsl::expects(huge_virt.is_pos());
bsl::discard(m_main_rpt.unmap(mut_tls, mut_page_pool, huge_virt));
mut_huge_pool.deallocate(mut_tls, *pmut_huge);
}
m_huge_allocs_idx = {};
for (auto &mut_elem : m_huge_allocs) {
mut_elem = {};
}
m_fail_ip = {};
m_vmexit_ip = {};
m_bootstrap_ip = {};
m_entry_ip = {};
for (auto &mut_rpt : m_direct_map_rpts) {
mut_rpt.release(mut_tls, mut_page_pool);
}
m_main_rpt.release(mut_tls, mut_page_pool);
m_is_executing_fail = {};
m_has_executed_start = {};
m_handle = {};
m_id = {};
}
/// <!-- description -->
/// @brief Returns the ID of this ext_t
///
/// <!-- inputs/outputs -->
/// @return Returns the ID of this ext_t
///
[[nodiscard]] constexpr auto
id() const noexcept -> bsl::safe_u16
{
bsl::ensures(m_id.is_valid_and_checked());
return ~m_id;
}
/// <!-- description -->
/// @brief Returns the bootstrap IP for this extension.
///
/// <!-- inputs/outputs -->
/// @return Returns the bootstrap IP for this extension.
///
[[nodiscard]] constexpr auto
bootstrap_ip() const noexcept -> bsl::safe_u64 const &
{
bsl::ensures(m_bootstrap_ip.is_valid_and_checked());
return m_bootstrap_ip;
}
/// <!-- description -->
/// @brief Sets the bootstrap IP for this extension. This should
/// be called by the syscall dispatcher as the result of a
/// syscall from the extension defining what IP the extension
/// would like to use for bootstrapping.
///
/// <!-- inputs/outputs -->
/// @param ip the bootstrap IP to use
///
constexpr void
set_bootstrap_ip(bsl::safe_u64 const &ip) noexcept
{
bsl::expects(ip.is_valid_and_checked());
bsl::expects(ip.is_pos());
m_bootstrap_ip = ip;
}
/// <!-- description -->
/// @brief Returns the VMExit IP for this extension.
///
/// <!-- inputs/outputs -->
/// @return Returns the VMExit IP for this extension.
///
[[nodiscard]] constexpr auto
vmexit_ip() const noexcept -> bsl::safe_u64 const &
{
bsl::ensures(m_vmexit_ip.is_valid_and_checked());
return m_vmexit_ip;
}
/// <!-- description -->
/// @brief Sets the VMExit IP for this extension. This should
/// be called by the syscall dispatcher as the result of a
/// syscall from the extension defining what IP the extension
/// would like to use for VMExits.
///
/// <!-- inputs/outputs -->
/// @param ip the VMExit IP to use
///
constexpr void
set_vmexit_ip(bsl::safe_u64 const &ip) noexcept
{
bsl::expects(ip.is_valid_and_checked());
bsl::expects(ip.is_pos());
m_vmexit_ip = ip;
}
/// <!-- description -->
/// @brief Returns the fast fail IP for this extension.
///
/// <!-- inputs/outputs -->
/// @return Returns the fast fail IP for this extension.
///
[[nodiscard]] constexpr auto
fail_ip() const noexcept -> bsl::safe_u64 const &
{
bsl::ensures(m_fail_ip.is_valid_and_checked());
return m_fail_ip;
}
/// <!-- description -->
/// @brief Sets the fast fail IP for this extension. This should
/// be called by the syscall dispatcher as the result of a
/// syscall from the extension defining what IP the extension
/// would like to use for fail callbacks.
///
/// <!-- inputs/outputs -->
/// @param ip the fail IP to use
///
constexpr void
set_fail_ip(bsl::safe_u64 const &ip) noexcept
{
bsl::expects(ip.is_valid_and_checked());
bsl::expects(ip.is_pos());
m_fail_ip = ip;
}
/// <!-- description -->
/// @brief Opens a handle and returns the resulting handle
///
/// <!-- inputs/outputs -->
/// @return Opens a handle and returns the resulting handle
///
[[nodiscard]] constexpr auto
open_handle() noexcept -> bsl::safe_u64
{
if (bsl::unlikely(m_handle.is_zero())) {
bsl::error() << "handle already opened\n" << bsl::here();
return bsl::safe_u64::failure();
}
m_handle = {};
return this->handle();
}
/// <!-- description -->
/// @brief Closes a previously opened handle
///
constexpr void
close_handle() noexcept
{
m_handle = syscall::BF_INVALID_HANDLE;
}
/// <!-- description -->
/// @brief Returns true if provided handle is valid
///
/// <!-- inputs/outputs -->
/// @param hndl the handle to verify
/// @return Returns true if provided handle is valid
///
[[nodiscard]] constexpr auto
is_handle_valid(bsl::safe_u64 const &hndl) const noexcept -> bool
{
bsl::expects(hndl.is_valid_and_checked());
return hndl == m_handle;
}
/// <!-- description -->
/// @brief Returns the ID of this ext_t
///
/// <!-- inputs/outputs -->
/// @return Returns the ID of this ext_t
///
[[nodiscard]] constexpr auto
handle() const noexcept -> bsl::safe_u64
{
bsl::ensures(m_handle.is_valid_and_checked());
return m_handle;
}
/// <!-- description -->
/// @brief Returns true if the extension's main function has
/// completed it's execution.
///
/// <!-- inputs/outputs -->
/// @return Returns true if the extension's main function has
/// completed it's execution.
///
[[nodiscard]] constexpr auto
is_started() const noexcept -> bool
{
return m_has_executed_start;
}
/// <!-- description -->
/// @brief Returns true if the extension's main function is
/// executing the fail_entry().
///
/// <!-- inputs/outputs -->
/// @return Returns true if the extension's main function is
/// executing the fail_entry().
///
[[nodiscard]] constexpr auto
is_executing_fail() const noexcept -> bool
{
return m_is_executing_fail;
}
/// <!-- description -->
/// @brief Allocates a page and maps it into the extension's
/// address space.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @return Returns a alloc_page_t containing the virtual address and
/// physical address of the page. If an error occurs, this
/// function will return an invalid virtual and physical address.
///
[[nodiscard]] constexpr auto
alloc_page(tls_t &mut_tls, page_pool_t &mut_page_pool) noexcept -> alloc_page_t
{
auto const page{m_main_rpt.allocate_page<>(mut_tls, mut_page_pool)};
if (bsl::unlikely(page.virt.is_invalid())) {
bsl::print<bsl::V>() << bsl::here();
return {bsl::safe_u64::failure(), bsl::safe_u64::failure()};
}
this->update_direct_map_rpts(mut_tls);
return page;
}
/// <!-- description -->
/// @brief Allocates a physically contiguous block of memory and maps
/// it into the extension's address space.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param mut_huge_pool the huge_pool_t to use
/// @param size the total number of bytes to allocate
/// @return Returns a huge_t containing the virtual address and
/// physical address of the memory block. If an error occurs, this
/// function will return an invalid virtual and physical address.
///
[[nodiscard]] constexpr auto
alloc_huge(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
huge_pool_t &mut_huge_pool,
bsl::safe_umx const &size) noexcept -> alloc_huge_t
{
bsl::expects(size.is_valid_and_checked());
bsl::expects(size.is_pos());
if (bsl::unlikely(m_huge_allocs_idx >= HYPERVISOR_MAX_HUGE_ALLOCS)) {
bsl::error() << "ext out of huge allocation slots\n" << bsl::endl;
return {bsl::safe_u64::failure(), bsl::safe_u64::failure()};
}
auto [mut_bytes, mut_pages]{size_to_page_aligned_bytes(size)};
if (bsl::unlikely(mut_bytes.is_poisoned())) {
bsl::print<bsl::V>() << bsl::here();
return {bsl::safe_u64::failure(), bsl::safe_u64::failure()};
}
auto mut_huge{mut_huge_pool.allocate(mut_tls, mut_pages)};
if (bsl::unlikely(mut_huge.is_invalid())) {
bsl::print<bsl::V>() << bsl::here();
return {bsl::safe_u64::failure(), bsl::safe_u64::failure()};
}
/// TODO:
/// - We really should register the allocation at the end of this
/// function, and on failure, we would just call free_huge()
/// so that all of the memory can be unmapped and deallocated
/// there. By registering here, if the map() below fails, we
/// are not leaking memory, but the allocation is lost and can
/// never be used again.
///
/// - For now this works because if the allocation fails, there
/// likely is no way to recover anyways as this is such a
/// limited resource, but if free_huge() is ever implemented,
/// this should be changed to "undo" what was done here so
/// that the huge allocation can be used in the future.
///
*m_huge_allocs.at_if(m_huge_allocs_idx) = mut_huge;
++m_huge_allocs_idx;
auto const huge_phys{mut_huge_pool.virt_to_phys(mut_huge.data())};
bsl::expects(huge_phys.is_valid_and_checked());
bsl::expects(huge_phys.is_pos());
auto const huge_virt{(HYPERVISOR_EXT_HUGE_POOL_ADDR + huge_phys).checked()};
bsl::expects(huge_virt.is_valid_and_checked());
bsl::expects(huge_virt.is_pos());
/// NOTE:
/// - Huge allocations come from the kernel's direct map, which
/// is the same size as the extension's direct map, something
/// that is validated by CMake. As a result, the virtual and
/// physical addresses below can never overflow which is why
/// they are marked as checked().
///
constexpr auto inc{bsl::to_idx(HYPERVISOR_PAGE_SIZE)};
for (bsl::safe_idx mut_i{}; mut_i < mut_bytes; mut_i += inc) {
auto const page_virt{(huge_virt + bsl::to_u64(mut_i)).checked()};
auto const page_phys{(huge_phys + bsl::to_u64(mut_i)).checked()};
auto const ret{m_main_rpt.map(
mut_tls, mut_page_pool, page_virt, page_phys, MAP_PAGE_RW, true)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return {bsl::safe_u64::failure(), bsl::safe_u64::failure()};
}
bsl::touch();
}
this->update_direct_map_rpts(mut_tls);
return {huge_virt, huge_phys};
}
/// <!-- description -->
/// @brief Maps a page into the direct map portion of the requested
/// VM's direct map RPT given a physical address to map.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param vmid the ID of the VM to map page_phys to
/// @param page_phys the physical address to map
/// @return Returns the virtual address the physical address was
/// mapped to in the direct map. On failure returns
/// bsl::safe_u64::failure().
///
[[nodiscard]] constexpr auto
map_page_direct(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
bsl::safe_u16 const &vmid,
bsl::safe_u64 const &page_phys) noexcept -> bsl::safe_u64
{
constexpr auto min_addr{HYPERVISOR_EXT_DIRECT_MAP_ADDR};
bsl::expects(vmid.is_valid_and_checked());
bsl::expects(bsl::to_umx(vmid) < m_direct_map_rpts.size());
bsl::expects(page_phys.is_valid_and_checked());
bsl::expects(page_phys.is_pos());
bsl::expects(page_phys < min_addr);
/// NOTE:
/// - CMake ensures that the addr and size make sense which is why
/// the following is marked as checked.
///
auto const page_virt{(page_phys + min_addr).checked()};
bsl::expects(page_virt.is_valid_and_checked());
bsl::expects(page_virt.is_pos());
auto *const pmut_direct_map_rpt{m_direct_map_rpts.at_if(bsl::to_idx(vmid))};
bsl::expects(nullptr != pmut_direct_map_rpt);
auto const ret{pmut_direct_map_rpt->map(
mut_tls, mut_page_pool, page_virt, page_phys, MAP_PAGE_RW)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return bsl::safe_u64::failure();
}
return page_virt;
}
/// <!-- description -->
/// @brief Unmaps a page from the direct map portion of the requested
/// VM's direct map RPT given a virtual address to unmap.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param intrinsic the intrinsic_t to use
/// @param vmid the ID of the VM to unmap page_virt to
/// @param page_virt the virtual address to map
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
unmap_page_direct(
tls_t &mut_tls,
page_pool_t &mut_page_pool,
intrinsic_t const &intrinsic,
bsl::safe_u16 const &vmid,
bsl::safe_u64 const &page_virt) noexcept -> bsl::errc_type
{
constexpr auto min_addr{HYPERVISOR_EXT_DIRECT_MAP_ADDR};
constexpr auto max_addr{(min_addr + HYPERVISOR_EXT_DIRECT_MAP_SIZE).checked()};
bsl::expects(vmid.is_valid_and_checked());
bsl::expects(bsl::to_umx(vmid) < m_direct_map_rpts.size());
bsl::expects(page_virt.is_valid_and_checked());
bsl::expects(page_virt.is_pos());
bsl::expects(page_virt >= min_addr);
bsl::expects(page_virt <= max_addr);
auto *const pmut_direct_map_rpt{m_direct_map_rpts.at_if(bsl::to_idx(vmid))};
bsl::expects(nullptr != pmut_direct_map_rpt);
auto const ret{pmut_direct_map_rpt->unmap(mut_tls, mut_page_pool, page_virt)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return ret;
}
intrinsic.tlb_flush(page_virt);
return ret;
}
/// <!-- description -->
/// @brief Tells the extension that a VM was created so that it
/// can initialize it's VM specific resources.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param vmid the ID of the VM that was created.
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
signal_vm_created(
tls_t &mut_tls, page_pool_t &mut_page_pool, bsl::safe_u16 const &vmid) noexcept
-> bsl::errc_type
{
bsl::expects(vmid.is_valid_and_checked());
bsl::expects(bsl::to_umx(vmid) < m_direct_map_rpts.size());
auto const ret{this->initialize_direct_map_rpt(
mut_tls, mut_page_pool, m_direct_map_rpts.at_if(bsl::to_idx(vmid)))};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return ret;
}
return ret;
}
/// <!-- description -->
/// @brief Tells the extension that a VM was destroyed so that it
/// can release it's VM specific resources.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_page_pool the page_pool_t to use
/// @param vmid the ID of the VM that was destroyed.
///
constexpr void
signal_vm_destroyed(
tls_t &mut_tls, page_pool_t &mut_page_pool, bsl::safe_u16 const &vmid) noexcept
{
bsl::expects(vmid.is_valid_and_checked());
bsl::expects(bsl::to_umx(vmid) < m_direct_map_rpts.size());
m_direct_map_rpts.at_if(bsl::to_idx(vmid))->release(mut_tls, mut_page_pool);
}
/// <!-- description -->
/// @brief Tells the extension that the requested VM was set to
/// active and therefore it's memory map should change on this PP.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_intrinsic the intrinsic_t to use
/// @param vmid the ID of the VM that was created.
///
constexpr void
signal_vm_active(
tls_t &mut_tls, intrinsic_t &mut_intrinsic, bsl::safe_u16 const &vmid) noexcept
{
bsl::expects(vmid.is_valid_and_checked());
bsl::expects(bsl::to_umx(mut_tls.active_vmid) < m_direct_map_rpts.size());
auto *const pmut_rpt{m_direct_map_rpts.at_if(bsl::to_idx(mut_tls.active_vmid))};
bsl::expects(nullptr != pmut_rpt);
pmut_rpt->activate(mut_tls, mut_intrinsic);
}
/// <!-- description -->
/// @brief Starts the extension by executing it's _start entry point.
/// If the extension has not been initialized, this function will
/// return bsl::errc_success.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_intrinsic the intrinsic_t to use
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
start(tls_t &mut_tls, intrinsic_t &mut_intrinsic) noexcept -> bsl::errc_type
{
auto const arg{bsl::to_u64(syscall::BF_ALL_SPECS_SUPPORTED_VAL)};
auto const ret{this->execute(mut_tls, mut_intrinsic, m_entry_ip, arg)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return ret;
}
m_has_executed_start = true;
return ret;
}
/// <!-- description -->
/// @brief Bootstraps the extension by executing it's bootstrap entry
/// point. If the extension has not been initialized, this function
/// will return bsl::errc_success.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_intrinsic the intrinsic_t to use
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
bootstrap(tls_t &mut_tls, intrinsic_t &mut_intrinsic) noexcept -> bsl::errc_type
{
if (bsl::unlikely(m_bootstrap_ip.is_zero())) {
bsl::error() << "a bootstrap handler was not registered for ext " // --
<< bsl::hex(this->id()) // --
<< bsl::endl // --
<< bsl::here(); // --
return bsl::errc_failure;
}
auto const arg{bsl::to_u64(mut_tls.ppid)};
auto const ret{this->execute(mut_tls, mut_intrinsic, m_bootstrap_ip, arg)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return ret;
}
return ret;
}
/// <!-- description -->
/// @brief Bootstraps the extension by executing it's bootstrap entry
/// point. If the extension has not been initialized, this function
/// will return bsl::errc_success.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_intrinsic the intrinsic_t to use
/// @param exit_reason the reason for the VMExit
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
vmexit(
tls_t &mut_tls, intrinsic_t &mut_intrinsic, bsl::safe_u64 const &exit_reason) noexcept
-> bsl::errc_type
{
auto const arg0{bsl::to_u64(mut_tls.active_vsid)};
auto const arg1{exit_reason};
auto const ret{this->execute(mut_tls, mut_intrinsic, m_vmexit_ip, arg0, arg1)};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return ret;
}
return ret;
}
/// <!-- description -->
/// @brief Bootstraps the extension by executing it's bootstrap entry
/// point. If the extension has not been initialized, this function
/// will return bsl::errc_success.
///
/// <!-- inputs/outputs -->
/// @param mut_tls the current TLS block
/// @param mut_intrinsic the intrinsic_t to use
/// @param errc the reason for the failure, which is CPU
/// specific. On x86, this is a combination of the exception
/// vector and error code.
/// @param addr contains a faulting address if the fail reason
/// is associated with an error that involves a faulting address (
/// for example like a page fault). Otherwise, the value of this
/// input is undefined.
/// @return Returns bsl::errc_success on success, bsl::errc_failure
/// and friends otherwise
///
[[nodiscard]] constexpr auto
fail(
tls_t &mut_tls,
intrinsic_t &mut_intrinsic,
bsl::safe_u64 const &errc,
bsl::safe_u64 const &addr) noexcept -> bsl::errc_type
{
m_is_executing_fail = true;
auto const ret{this->execute(mut_tls, mut_intrinsic, m_fail_ip, errc, addr)};
m_is_executing_fail = {};
if (bsl::unlikely(!ret)) {
bsl::print<bsl::V>() << bsl::here();
return ret;
}
return ret;
}
/// <!-- description -->
/// @brief Dumps the vm_t
///
/// <!-- inputs/outputs -->
/// @param tls the current TLS block
///
constexpr void
dump(tls_t const &tls) const noexcept
{
if constexpr (BSL_DEBUG_LEVEL == bsl::CRITICAL_ONLY) {
return;
}
bsl::print() << bsl::mag << "ext [";
bsl::print() << bsl::rst << bsl::hex(this->id());
bsl::print() << bsl::mag << "] dump: ";
bsl::print() << bsl::rst << bsl::endl;
/// Header
///
bsl::print() << bsl::ylw << "+------------------------------------+";
bsl::print() << bsl::rst << bsl::endl;
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::cyn << bsl::fmt{"^14s", "description "};
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::cyn << bsl::fmt{"^19s", "value "};
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::endl;
bsl::print() << bsl::ylw << "+------------------------------------+";
bsl::print() << bsl::rst << bsl::endl;
/// Started
///
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::fmt{"<14s", "started "};
bsl::print() << bsl::ylw << "| ";
if (m_has_executed_start) {
bsl::print() << bsl::grn << bsl::fmt{"^19s", "yes "};
}
else {
bsl::print() << bsl::red << bsl::fmt{"^19s", "no "};
}
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::endl;
/// Active
///
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::fmt{"<14s", "active "};
bsl::print() << bsl::ylw << "| ";
if (tls.active_extid == this->id()) {
bsl::print() << bsl::grn << bsl::fmt{"^19s", "yes "};
}
else {
bsl::print() << bsl::red << bsl::fmt{"^19s", "no "};
}
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::endl;
/// Entry IP
///
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::fmt{"<14s", "entry ip "};
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::hex(m_entry_ip) << ' ';
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::endl;
/// Bootstrap IP
///
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::fmt{"<14s", "bootstrap ip "};
bsl::print() << bsl::ylw << "| ";
if (m_bootstrap_ip.is_pos()) {
bsl::print() << bsl::rst << bsl::hex(m_bootstrap_ip) << ' ';
}
else {
bsl::print() << bsl::red << bsl::fmt{"^19s", "not registered "};
}
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::endl;
/// VMExit IP
///
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::fmt{"<14s", "vmexit ip "};
bsl::print() << bsl::ylw << "| ";
if (m_vmexit_ip.is_pos()) {
bsl::print() << bsl::rst << bsl::hex(m_vmexit_ip) << ' ';
}
else {
bsl::print() << bsl::red << bsl::fmt{"^19s", "not registered "};
}
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::endl;
/// Fail IP
///
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::fmt{"<14s", "fail ip "};
bsl::print() << bsl::ylw << "| ";
if (m_fail_ip.is_pos()) {
bsl::print() << bsl::rst << bsl::hex(m_fail_ip) << ' ';
}
else {
bsl::print() << bsl::red << bsl::fmt{"^19s", "not registered "};
}
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::endl;
/// Handle
///
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::fmt{"<14s", "handle "};
bsl::print() << bsl::ylw << "| ";
if (m_handle.is_zero()) {
bsl::print() << bsl::rst << " " << bsl::hex(m_handle) << " ";
}
else {
bsl::print() << bsl::red << bsl::fmt{"^19s", "not opened "};
}
bsl::print() << bsl::ylw << "| ";
bsl::print() << bsl::rst << bsl::endl;
/// Footer
///
bsl::print() << bsl::ylw << "+------------------------------------+";
bsl::print() << bsl::rst << bsl::endl;
}
};
}
#endif
| 35,938 |
335 |
<reponame>Safal08/Hacktoberfest-1<gh_stars>100-1000
{
"word": "Farewell",
"definitions": [
"Mark the departure or retirement of (someone) with a ceremony or party."
],
"parts-of-speech": "Verb"
}
| 93 |
14,668 |
<reponame>zealoussnow/chromium
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/offline_items_collection/core/update_delta.h"
namespace offline_items_collection {
// static
absl::optional<UpdateDelta> UpdateDelta::MergeUpdates(
const absl::optional<UpdateDelta>& update1,
const absl::optional<UpdateDelta>& update2) {
if (!update1.has_value())
return update2;
if (!update2.has_value())
return update1;
UpdateDelta merged;
merged.state_changed = update1->state_changed || update2->state_changed;
merged.visuals_changed = update1->visuals_changed || update2->visuals_changed;
return merged;
}
UpdateDelta::UpdateDelta() : state_changed(true), visuals_changed(false) {}
UpdateDelta::UpdateDelta(const UpdateDelta& other) = default;
UpdateDelta::~UpdateDelta() = default;
} // namespace offline_items_collection
| 304 |
4,041 |
/*
* Copyright 2019 Web3 Labs Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package org.web3j.protocol.core;
import com.fasterxml.jackson.annotation.JsonValue;
/** https://github.com/ethereum/wiki/wiki/JSON-RPC#the-default-block-parameter */
public enum DefaultBlockParameterName implements DefaultBlockParameter {
EARLIEST("earliest"),
LATEST("latest"),
PENDING("pending");
private String name;
DefaultBlockParameterName(String name) {
this.name = name;
}
@JsonValue
@Override
public String getValue() {
return name;
}
public static DefaultBlockParameterName fromString(String name) {
if (name != null) {
for (DefaultBlockParameterName defaultBlockParameterName :
DefaultBlockParameterName.values()) {
if (name.equalsIgnoreCase(defaultBlockParameterName.name)) {
return defaultBlockParameterName;
}
}
}
return valueOf(name);
}
}
| 534 |
468 |
#define GLI_INCLUDE_GL_ARB_SPARSE_BUFFER
enum Mask_Access {
GL_SPARSE_STORAGE_BIT_ARB = 0x0400,
};
enum Main {
GL_SPARSE_BUFFER_PAGE_SIZE_ARB = 0x82F8,
};
void glBufferPageCommitmentARB(GLenum[Main] target, GLintptr offset, GLsizeiptr size, GLboolean commit);
void glNamedBufferPageCommitmentEXT(GLuint buffer, GLintptr offset, GLsizeiptr size, GLboolean commit);
void glNamedBufferPageCommitmentARB(GLuint buffer, GLintptr offset, GLsizeiptr size, GLboolean commit);
| 199 |
679 |
<gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
#include "precompiled_sfx2.hxx"
#include "ToolBoxBackground.hxx"
#include "Paint.hxx"
#include "DrawHelper.hxx"
#include "sfx2/sidebar/Tools.hxx"
#include "sfx2/sidebar/Theme.hxx"
#include <vcl/toolbox.hxx>
#include <vcl/gradient.hxx>
#include <svl/smplhint.hxx>
namespace sfx2 { namespace sidebar {
ToolBoxBackground::ToolBoxBackground (
Window* pParentWindow,
const bool bShowBorder)
: Window(pParentWindow, WB_DIALOGCONTROL),
maPadding(bShowBorder
? Tools::RectangleToSvBorder(Theme::GetRectangle(Theme::Rect_ToolBoxPadding))
: SvBorder())
{
if (bShowBorder)
SetBackground(Theme::GetPaint(Theme::Paint_ToolBoxBackground).GetWallpaper());
else
SetBackground(Wallpaper());
#ifdef DEBUG
SetText(A2S("ToolBoxBackground"));
#endif
}
ToolBoxBackground::~ToolBoxBackground (void)
{
Link aEventListener (LINK(this, ToolBoxBackground, WindowEventHandler));
if (GetChildCount() > 0)
GetChild(0)->RemoveEventListener(aEventListener);
}
Point ToolBoxBackground::SetToolBoxChild (
ToolBox* pChild,
long nX,
long nY,
long nWidth,
long nHeight,
sal_uInt16 nFlags)
{
if (pChild == NULL)
{
OSL_ASSERT(pChild!=NULL);
return Point(nX, nY);
}
Link aEventListener (LINK(this, ToolBoxBackground, WindowEventHandler));
pChild->AddEventListener(aEventListener);
SetPosSizePixel(
nX - maPadding.Left(),
nY - maPadding.Top(),
nWidth + maPadding.Left() + maPadding.Right(),
nHeight + maPadding.Top() + maPadding.Bottom(),
nFlags);
return Point(
maPadding.Left(),
maPadding.Top());
}
void ToolBoxBackground::Paint (const Rectangle& rRect)
{
Window::Paint(rRect);
Rectangle aBox (Point(0,0), GetSizePixel());
const sidebar::Paint aTopLeftBorderPaint (Theme::GetPaint(Theme::Paint_ToolBoxBorderTopLeft));
const sidebar::Paint aCenterBorderPaint (Theme::GetPaint(Theme::Paint_ToolBoxBorderCenterCorners));
const sidebar::Paint aBottomRightBorderPaint (Theme::GetPaint(Theme::Paint_ToolBoxBorderBottomRight));
const Rectangle aBorderSize (Theme::GetRectangle(Theme::Rect_ToolBoxBorder));
DrawHelper::DrawBevelBorder (
*this,
aBox,
Tools::RectangleToSvBorder(aBorderSize),
aTopLeftBorderPaint,
aCenterBorderPaint,
aBottomRightBorderPaint);
}
void ToolBoxBackground::DataChanged (const DataChangedEvent& rEvent)
{
(void)rEvent;
SetBackground(Theme::GetPaint(Theme::Paint_ToolBoxBackground).GetWallpaper());
maPadding = Tools::RectangleToSvBorder(Theme::GetRectangle(Theme::Rect_ToolBoxPadding));
}
IMPL_LINK(ToolBoxBackground, WindowEventHandler, VclWindowEvent*, pEvent)
{
if (pEvent != NULL)
{
switch (pEvent->GetId())
{
case VCLEVENT_WINDOW_SHOW:
if (GetChild(0)->IsVisible())
Show();
break;
case VCLEVENT_WINDOW_HIDE:
if ( ! GetChild(0)->IsVisible())
Hide();
break;
default:
break;
}
}
return sal_True;
}
} } // end of namespace sfx2::sidebar
| 1,636 |
4,756 |
// Copyright 2020 The MACE Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MICRO_TEST_CCUTILS_MICRO_OPS_TEST_QUANTIZE_UTILS_H_
#define MICRO_TEST_CCUTILS_MICRO_OPS_TEST_QUANTIZE_UTILS_H_
#include <math.h>
#include <stdint.h>
#include <limits>
#include "micro/base/logging.h"
#include "micro/common/global_buffer.h"
#include "micro/include/public/micro.h"
#include "micro/port/api.h"
namespace micro {
namespace ops {
namespace test {
template <typename Q>
inline Q Saturate(float value) {
int rounded_value = static_cast<int>(value);
if (rounded_value <= std::numeric_limits<Q>::lowest()) {
return std::numeric_limits<Q>::lowest();
} else if (rounded_value >= std::numeric_limits<Q>::max()) {
return std::numeric_limits<Q>::max();
} else {
return static_cast<Q>(rounded_value);
}
}
inline void FindMinMax(const float *input,
const uint32_t size,
float *min_val,
float *max_val) {
float max_v = base::lowest();
float min_v = base::highest();
for (uint32_t i = 0; i < size; ++i) {
max_v = base::max(max_v, input[i]);
min_v = base::min(min_v, input[i]);
}
*min_val = min_v;
*max_val = max_v;
}
template <typename Q>
inline void QuantizeWithScaleAndZeropoint(const float *input,
const uint32_t size,
float scale,
int32_t zero_point,
Q *output) {
float recip_scale = 1 / scale;
for (uint32_t i = 0; i < size; ++i) {
output[i] = Saturate<Q>(roundf(zero_point + recip_scale * input[i]));
}
}
inline void AdjustRangeInt8(const float *input,
const uint32_t size,
float *scale,
int32_t *zero_point) {
float in_min_data;
float in_max_data;
FindMinMax(input, size, &in_min_data, &in_max_data);
in_max_data = base::max(0.f, in_max_data);
in_min_data = base::min(0.f, in_min_data);
*scale = (in_max_data - in_min_data) / 255;
*zero_point = int8_t(-in_min_data / *scale - 128);
}
inline void AdjustRangeInt8Symmetric(const float *input,
const uint32_t size,
float *scale) {
float in_min_data;
float in_max_data;
FindMinMax(input, size, &in_min_data, &in_max_data);
in_max_data = base::max(0.f, in_max_data);
in_min_data = base::min(0.f, in_min_data);
float max_abs = base::max(base::abs(in_max_data), base::abs(in_min_data));
*scale = max_abs / 127.0f;
}
inline void AutoQuantizeInt8(const float *input,
const uint32_t size,
int8_t *output,
float *scale,
int32_t *zero_point) {
AdjustRangeInt8(input, size, scale, zero_point);
QuantizeWithScaleAndZeropoint(input, size, *scale, *zero_point, output);
}
inline void AutoQuantizeInt8Symmetric(const float *input,
const uint32_t size,
int8_t *output,
float *scale) {
AdjustRangeInt8Symmetric(input, size, scale);
QuantizeWithScaleAndZeropoint(input, size, *scale, 0, output);
}
inline void Dequantize(const int8_t *input,
const uint32_t size,
const float scale,
const int32_t zero_point,
float *output) {
for (uint32_t i = 0; i < size; ++i) {
output[i] = static_cast<float>(scale * (input[i] - zero_point));
}
}
} // namespace test
} // namespace ops
} // namespace micro
#endif // MICRO_TEST_CCUTILS_MICRO_OPS_TEST_QUANTIZE_UTILS_H_
| 2,070 |
848 |
<gh_stars>100-1000
/*
* Copyright 2019 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits>
#include <memory>
#include <numeric>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <vector>
#include "vitis/ai/xmodel_postprocessor.hpp"
namespace {
class MyPostProcessor {
public:
static xir::OpDef get_op_def() {
return xir::OpDef("open_pose") //
.add_input_arg(xir::OpArgDef{"L1", xir::OpArgDef::REQUIRED,
xir::DataType::Type::FLOAT,
"pose detection"})
.add_input_arg(xir::OpArgDef{"L2", xir::OpArgDef::REQUIRED,
xir::DataType::Type::FLOAT,
"pose detection"})
.set_annotation("postprocessor for open pose.");
}
explicit MyPostProcessor(
vitis::ai::XmodelPostprocessorInitializationArgs&& args) {
auto input_shape = args.graph_input_tensor->get_shape();
CHECK_EQ(input_shape.size(), 4u);
height_ = input_shape[1];
width_ = input_shape[2];
}
vitis::ai::proto::DpuModelResult process(
const vart::experimental::simple_tensor_buffer_t<float>& L1,
const vart::experimental::simple_tensor_buffer_t<float>& L2);
private:
int width_;
int height_;
};
using Peak = std::tuple<int, float, cv::Point2f>;
using Peaks = vector<Peak>;
using AllPeaks = vector<Peaks>;
using Candidate = tuple<int, int, float, float>;
using Connection = tuple<int, int, float, int, int>;
using AllConnection = vector<Connection>;
static vector<vector<int>> limbSeq = {
{0, 1}, {1, 2}, {2, 3}, {3, 4}, {1, 5}, {5, 6}, {6, 7},
{1, 8}, {8, 9}, {9, 10}, {1, 11}, {11, 12}, {12, 13}};
static vector<vector<int>> mapIdx = {
{15, 16}, {17, 18}, {19, 20}, {21, 22}, {23, 24}, {25, 26}, {27, 28},
{29, 30}, {31, 32}, {33, 34}, {35, 36}, {37, 38}, {39, 40}};
bool isThreeInConnection(const vector<Connection>& connections, int index) {
for (size_t i = 0; i < connections.size(); ++i) {
if (index == get<3>(connections[i])) return true;
}
return false;
}
bool isFourInConnection(const vector<Connection>& connections, int index) {
for (size_t i = 0; i < connections.size(); ++i) {
if (index == get<4>(connections[i])) return true;
}
return false;
}
void find_peak(cv::Mat ori_img, Peaks& peaks, int& idx) {
cv::Mat gas_img;
GaussianBlur(ori_img, gas_img, cv::Size(3, 3), 3);
for (int x = 1; x < gas_img.cols - 1; ++x)
for (int y = 1; y < gas_img.rows - 1; ++y) {
{
if (gas_img.at<float>(y, x) <= 0.1) continue;
if (gas_img.at<float>(y, x) >= gas_img.at<float>(y, x - 1) &&
gas_img.at<float>(y, x) >= gas_img.at<float>(y - 1, x) &&
gas_img.at<float>(y, x) >= gas_img.at<float>(y, x + 1) &&
gas_img.at<float>(y, x) >= gas_img.at<float>(y + 1, x)) {
peaks.emplace_back(++idx, ori_img.at<float>(y, x), cv::Point(x, y));
}
}
}
}
void findLines(int width, const vector<cv::Mat>& pafs,
const AllPeaks& all_peaks, vector<AllConnection>& connection_all,
vector<int>& special_k) {
vector<Connection> connection;
int mid_num = 10;
for (size_t k = 0; k < mapIdx.size(); ++k) {
cv::Mat score_midx = pafs[mapIdx[k][0] - 15];
cv::Mat score_midy = pafs[mapIdx[k][1] - 15];
Peaks candA = all_peaks[limbSeq[k][0]];
Peaks candB = all_peaks[limbSeq[k][1]];
size_t nA = candA.size();
size_t nB = candB.size();
vector<float> vec;
vec.reserve(2);
if (!candA.empty() && !candB.empty()) {
vector<Candidate> connection_candidate;
for (size_t i = 0; i < candA.size(); ++i) {
for (size_t j = 0; j < candB.size(); ++j) {
vec[0] = get<2>(candA[i]).x - get<2>(candB[j]).x;
vec[1] = get<2>(candA[i]).y - get<2>(candB[j]).y;
float norm = sqrt(vec[0] * vec[0] + vec[1] * vec[1]);
vector<cv::Point2f> points;
for (int a = 0; a < mid_num; ++a) {
points.emplace_back(cv::Point2f(
int(round(get<2>(candA[i]).x - a * vec[0] / (mid_num - 1))),
int(round(get<2>(candA[i]).y - a * vec[1] / (mid_num - 1)))));
}
vec[0] = vec[0] / norm;
vec[1] = vec[1] / norm;
vector<float> vec_x;
vector<float> vec_y;
vector<float> score_midpts;
float sum = 0;
int lencir = 0;
for (size_t b = 0; b < points.size(); ++b) {
vec_x.emplace_back(score_midx.at<float>(points[b].y, points[b].x));
vec_y.emplace_back(score_midy.at<float>(points[b].y, points[b].x));
score_midpts.emplace_back(
abs(vec_x[b] * vec[0] + vec_y[b] * vec[1]));
sum += score_midpts[b];
if (score_midpts[b] > 0.05) lencir++;
}
float score_with_dist_prior =
sum / score_midpts.size() + min(0.5 * width / norm - 1, 0.0);
bool cirterion1 = lencir > 0.8 * score_midpts.size();
bool cirterion2 = score_with_dist_prior > 0;
if (cirterion1 && cirterion2) {
connection_candidate.emplace_back(
i, j, score_with_dist_prior,
score_with_dist_prior + get<1>(candA[i]) + get<1>(candB[j]));
}
}
}
std::sort(connection_candidate.begin(), connection_candidate.end(),
[](const tuple<int, int, float, float>& lhs,
const tuple<int, int, float, float>& rhs) {
return get<2>(lhs) > get<2>(rhs);
});
connection.clear();
for (size_t c = 0; c < connection_candidate.size(); ++c) {
int i = get<0>(connection_candidate[c]);
int j = get<1>(connection_candidate[c]);
float s = get<2>(connection_candidate[c]);
if (!isThreeInConnection(connection, i) &&
!isFourInConnection(connection, j)) {
connection.emplace_back(get<0>(candA[i]), get<0>(candB[j]), s, i, j);
if (connection.size() >= min(nA, nB)) break;
}
}
connection_all.emplace_back(connection);
} else {
special_k.emplace_back(k);
connection.clear();
connection_all.emplace_back(connection);
}
}
}
struct OpenPoseResult {
struct PosePoint {
/// Point type \li \c 1 : "valid" \li \c 3 : "invalid"
int type = 0;
/// Coordinate point.
cv::Point2f point;
};
/// A vector of pose, pose is represented by a vector of PosePoint.
/// Joint points are arranged in order
/// 0: head, 1: neck, 2: L_shoulder, 3:L_elbow, 4: L_wrist, 5: R_shoulder,
/// 6: R_elbow, 7: R_wrist, 8: L_hip, 9:L_knee, 10: L_ankle, 11: R_hip,
/// 12: R_knee, 13: R_ankle
std::vector<std::vector<PosePoint>> poses;
};
std::vector<std::vector<OpenPoseResult::PosePoint>> getPoses(
const AllPeaks& all_peaks, vector<AllConnection>& connection_all,
vector<int>& special_k) {
vector<vector<int>> subset(0, vector<int>(16, -1));
Peaks candidate;
for (auto peaks : all_peaks) {
for (auto peak : peaks) {
candidate.emplace_back(peak);
}
}
for (size_t k = 0; k < mapIdx.size(); ++k) {
if (find(special_k.begin(), special_k.end(), k) == special_k.end()) {
int indexA = limbSeq[k][0];
int indexB = limbSeq[k][1];
for (size_t i = 0; i < connection_all[k].size(); ++i) {
int found = 0;
int partA = get<0>(connection_all[k][i]);
int partB = get<1>(connection_all[k][i]);
vector<int> subset_idx(2, -1);
for (size_t j = 0; j < subset.size(); ++j) {
if (subset[j][indexA] == partA || subset[j][indexB] == partB) {
subset_idx[found] = j;
found += 1;
}
}
if (found == 1) {
int j = subset_idx[0];
if (subset[j][indexB] != partB) {
subset[j][indexB] = partB;
subset[j][15] += 1;
subset[j][14] +=
get<0>(candidate[partA]) + get<2>(connection_all[k][i]);
}
} else if (found == 2) {
int j1 = subset_idx[0];
int j2 = subset_idx[1];
vector<int> membership(14, 0);
for (size_t a = 0; a < membership.size(); ++a) {
int x = subset[j1][a] >= 0 ? 1 : 0;
int y = subset[j2][a] >= 0 ? 1 : 0;
membership[a] = x + y;
}
if (find(membership.begin(), membership.end(), 2) ==
membership.end()) {
for (size_t a = 0; a < subset.size() - 2; ++a) {
subset[j1][a] += (subset[j2][a] + 1);
}
for (size_t a = subset.size() - 2; a < subset.size(); ++a) {
subset[j1][a] += subset[j2][a];
}
subset[j1][13] += get<2>(connection_all[k][i]);
} else {
subset[j1][indexB] = partA;
subset[j1][15] += 1;
subset[j1][14] +=
get<0>(candidate[partB]) + get<2>(connection_all[k][i]);
}
} else if (found == 0 && k < 14) {
vector<int> row(16, -1);
row[indexA] = partA;
row[indexB] = partB;
row[15] = 2;
row[14] = get<0>(candidate[partA]) + get<0>(candidate[partB]) +
get<2>(connection_all[k][i]);
subset.emplace_back(row);
}
}
}
}
for (size_t i = 0; i < subset.size(); ++i) {
for (size_t j = 0; j < subset[i].size(); ++j) {
}
if (subset[i][15] < 4 || subset[i][14] / subset[i][15] < 0.4) {
subset.erase(subset.begin() + i);
--i;
}
}
OpenPoseResult::PosePoint posePoint;
std::vector<std::vector<OpenPoseResult::PosePoint>> poses(
subset.size() + 1, vector<OpenPoseResult::PosePoint>(14, posePoint));
for (size_t i = 0; i < subset.size(); ++i) {
for (int j = 0; j < 14; ++j) {
int idx = subset[i][j];
if (idx == -1) {
(poses[subset.size() - i][j]).type = 3;
continue;
}
(poses[subset.size() - i][j]).type = 1;
(poses[subset.size() - i][j]).point = get<2>(candidate[idx]);
}
}
return poses;
}
vitis::ai::proto::DpuModelResult MyPostProcessor::process(
const vart::experimental::simple_tensor_buffer_t<float>& chwdataL1_dpu,
const vart::experimental::simple_tensor_buffer_t<float>& chwdataL2_dpu) {
auto L1_shape = chwdataL1_dpu.tensor->get_shape();
CHECK_EQ(L1_shape.size(), 4u);
auto hL1 = L1_shape[1];
auto wL1 = L1_shape[2];
auto channelL1 = L1_shape[3];
auto L2_shape = chwdataL2_dpu.tensor->get_shape();
auto hL2 = L2_shape[1];
auto wL2 = L2_shape[2];
auto channelL2 = L2_shape[3];
vector<float> chwdataL2;
chwdataL2.reserve(hL2 * wL2 * channelL2);
// transpose NHWC to NCHW
for (int ih = 0; ih < hL2; ++ih) {
for (int iw = 0; iw < wL2; ++iw) {
for (int ic = 0; ic < channelL2; ++ic) {
int offset = ic * wL2 * hL2 + ih * wL2 + iw;
chwdataL2[offset] =
chwdataL2_dpu.data[ih * wL2 * channelL2 + iw * channelL2 + ic];
}
}
}
int idx = -1;
AllPeaks all_peaks;
for (int i = 0; i < channelL2 - 1; ++i) {
cv::Mat um(hL2, wL2, CV_32F, chwdataL2.data() + i * wL2 * hL2);
resize(um, um, cv::Size(0, 0), 8, 8, cv::INTER_CUBIC);
Peaks peaks;
#ifdef ENABLE_NEON
find_peak_neon(um, peaks, idx);
#else
find_peak(um, peaks, idx);
#endif
all_peaks.emplace_back(peaks);
}
vector<float> chwdataL1;
chwdataL1.reserve(hL1 * wL1 * channelL1);
for (int ih = 0; ih < hL1; ++ih) {
for (int iw = 0; iw < wL1; ++iw) {
for (int ic = 0; ic < channelL1; ++ic) {
int offset = ic * wL1 * hL1 + ih * wL1 + iw;
chwdataL1[offset] =
chwdataL1_dpu.data[ih * wL1 * channelL1 + iw * channelL1 + ic];
}
}
}
vector<cv::Mat> pafs;
for (int i = 0; i < channelL1; ++i) {
cv::Mat um(hL1, wL1, CV_32F, chwdataL1.data() + i * wL1 * hL1);
cv::resize(um, um, cv::Size(0, 0), 8, 8, cv::INTER_CUBIC);
pafs.emplace_back(um);
}
vector<int> special_k;
vector<AllConnection> connection_all;
auto sWidth = width_;
auto sHeight = height_;
findLines(sWidth, pafs, all_peaks, connection_all, special_k);
auto poses = getPoses(all_peaks, connection_all, special_k);
auto ret = vitis::ai::proto::DpuModelResult();
auto key = ret.mutable_pose_detect_result();
float scale_x = 1.0f / float(sWidth);
float scale_y = 1.0f / float(sHeight);
// TODO: why start from 1?
for (size_t k = 1; k < poses.size(); ++k) {
auto size = poses[k].size();
CHECK_EQ(size, 14u);
auto i = 0;
if (poses[k][i].type == 1) {
key->mutable_head()->set_x(poses[k][i].point.x * scale_x);
key->mutable_head()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_neck()->set_x(poses[k][i].point.x * scale_x);
key->mutable_neck()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_right_shoulder()->set_x(poses[k][i].point.x * scale_x);
key->mutable_right_shoulder()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_right_elbow()->set_x(poses[k][i].point.x * scale_x);
key->mutable_right_elbow()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_right_wrist()->set_x(poses[k][i].point.x * scale_x);
key->mutable_right_wrist()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_left_shoulder()->set_x(poses[k][i].point.x * scale_x);
key->mutable_left_shoulder()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_left_elbow()->set_x(poses[k][i].point.x * scale_x);
key->mutable_left_elbow()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_left_wrist()->set_x(poses[k][i].point.x * scale_x);
key->mutable_left_wrist()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_right_hip()->set_x(poses[k][i].point.x * scale_x);
key->mutable_right_hip()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_right_knee()->set_x(poses[k][i].point.x * scale_x);
key->mutable_right_knee()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_right_ankle()->set_x(poses[k][i].point.x * scale_x);
key->mutable_right_ankle()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_left_hip()->set_x(poses[k][i].point.x * scale_x);
key->mutable_left_hip()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_left_knee()->set_x(poses[k][i].point.x * scale_x);
key->mutable_left_knee()->set_y(poses[k][i].point.y * scale_y);
}
i++;
if (poses[k][i].type == 1) {
key->mutable_left_ankle()->set_x(poses[k][i].point.x * scale_x);
key->mutable_left_ankle()->set_y(poses[k][i].point.y * scale_y);
}
i++;
}
return ret;
}
} // namespace
extern "C" std::unique_ptr<vitis::ai::XmodelPostprocessorBase>
create_xmodel_postprocessor() {
return std::make_unique<vitis::ai::XmodelPostprocessor<MyPostProcessor>>();
}
| 7,904 |
575 |
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package org.chromium.chrome.browser.browserservices;
import android.os.SystemClock;
import org.chromium.base.metrics.RecordHistogram;
import org.chromium.chrome.browser.browserservices.verification.OriginVerifier;
/**
* Class to contain metrics recording constants and behaviour for Browser Services.
*/
public class BrowserServicesMetrics {
/** Implementation of {@link OriginVerifier.MetricsListener}. */
public static class OriginVerifierMetricsListener implements OriginVerifier.MetricsListener {
@Override
public void recordVerificationResult(@OriginVerifier.VerificationResult int result) {
RecordHistogram.recordEnumeratedHistogram("BrowserServices.VerificationResult", result,
OriginVerifier.VerificationResult.NUM_ENTRIES);
}
@Override
public void recordVerificationTime(long duration, boolean online) {
if (online) {
RecordHistogram.recordTimesHistogram(
"BrowserServices.VerificationTime.Online", duration);
} else {
RecordHistogram.recordTimesHistogram(
"BrowserServices.VerificationTime.Offline", duration);
}
}
}
/**
* Returns a {@link TimingMetric} that records the amount of time spent querying the Android
* system for ResolveInfos that will deal with a given URL when launching from a background
* service.
*/
public static TimingMetric getServiceTabResolveInfoTimingContext() {
return new TimingMetric("BrowserServices.ServiceTabResolveInfoQuery");
}
/**
* Returns a {@link TimingMetric} that records the amount of time spent opening the
* {@link ClientAppDataRegister}.
*/
public static TimingMetric getClientAppDataLoadTimingContext() {
return new TimingMetric("BrowserServices.ClientAppDataLoad");
}
/**
* Returns a {@link TimingMetric} that records the amount of time taken to check if a package
* handles a Browsable intent.
*/
public static TimingMetric getBrowsableIntentResolutionTimingContext() {
return new TimingMetric("BrowserServices.BrowsableIntentCheck");
}
/**
* A class to be used with a try-with-resources to record the elapsed time within the try block.
*/
public static class TimingMetric implements AutoCloseable {
private final String mMetric;
private final long mStart;
private static long now() {
return SystemClock.uptimeMillis();
}
private TimingMetric(String metric) {
mMetric = metric;
mStart = now();
}
@Override
public void close() {
RecordHistogram.recordMediumTimesHistogram(mMetric, now() - mStart);
}
}
// Don't let anyone instantiate.
private BrowserServicesMetrics() {}
}
| 1,108 |
416 |
<filename>iPhoneOS14.2.sdk/System/Library/Frameworks/PDFKit.framework/Headers/PDFActionRemoteGoTo.h<gh_stars>100-1000
//
// PDFActionRemoteGoTo.h
// Copyright © 2019 Apple. All rights reserved.
//
// PDFActionRemoteGoTo is an interaction event tied with an annotation. This action
// is the same action type as PDFActionGoTo, but references another document, allowing
// you to jump to specific pages or positions in other PDF files.
//
#import <PDFKit/PDFKitPlatform.h>
#import <PDFKit/PDFAction.h>
NS_ASSUME_NONNULL_BEGIN
@class PDFActionRemoteGoToPrivateVars;
PDFKIT_CLASS_AVAILABLE(10_5, 11_0)
@interface PDFActionRemoteGoTo : PDFAction <NSCopying>
{
@private
PDFActionRemoteGoToPrivateVars *_private2;
}
// As with the calls below, note that PDFActionRemoteGoTo uses a zero-based page index and not a PDFPage object.
// This simplifies dealing with remote destinations for documents that may not be instantiated.
- (instancetype)initWithPageIndex:(NSUInteger)pageIndex atPoint:(PDFPoint)point fileURL:(NSURL *)url NS_DESIGNATED_INITIALIZER;
// Get and set the page index (zero-based) referenced by the action.
@property (nonatomic) NSUInteger pageIndex;
// Get and set the point on the above page referenced by the action. Just like PDFDestination a value of
// kPDFDestinationUnspecifiedValue for the x or the y of the point indicates that no position is specified.
@property (nonatomic) PDFPoint point;
// Get and set the URL of the document referenced from the action.
@property (nonatomic, copy) NSURL *URL;
@end
NS_ASSUME_NONNULL_END
| 487 |
521 |
<gh_stars>100-1000
/**
* @file include/retdec/config/doxygen.h
* @brief Doxygen documentation of the config namespace.
* @copyright (c) 2017 Avast Software, licensed under the MIT license
*/
// As there is no better place to comment this namespace, we do this in the
// present file.
/**
@namespace config A library providing access to configuration of
decompilations.
@section intro_sec Introduction
@c config library defines many objects used throughout all decompiler's parts
and groups them together into an instance of Config class.
Its main purpose is to hold all relevant information gathered about the
input binary file and make them persistent by serialization into human-readable
text file in JSON data format.
Possible uses of this library include:
<ul>
<li> Propagate information between all decompiler's parts. If, for example,
front-end detects functions and their properties (e.g. addresses),
it fills these information into Config instance, serialize it and therefore
makes it available to all upcoming decompilation parts.
<li> Propagate information to the decompilation itself. It is possible to provide
decompilation with the input JSON config file, which is then deserialized into
Config instance and used by the decompiler. This allows other tools such as
IDA Pro plugin to guide and enrich decompilation results.
<li> Generated Config JSON file may also serve as a persistent database
for completed decompilations. It can be changed, archived or shared, and
if needed fed back to the decompiler to repeat the decompilation.
</ul>
@section cpp_naming Naming Conventions
The library uses the following naming conventions.
<ul>
<li> Source files (i.e. @c *.{h,cpp}) are named by using @c snake_case,
e.g. architecture.h or calling_convention.cpp.
<li> Functions and variables are named by using @c camelCase,
e.g. Parameters::outputFile or Architecture::setIsEndianLittle().
<li> Classes are named by using @c CamelCase, e.g. Architecture.
<li> No name contains two or more successive uppercase letters,
even if it corresponds to an abbreviation, e.g ToolInfoContainer::isPspGcc()
is used instead of @c ToolInfoContainer::isPSPGCC().
<li> All setters are prefixed wtih @c set, boolean getters with @c is
and all other getters with @c get. If a setter sets a boolean value
or explicit @c enum variant, it is prefixed with @c setIs.
</ul>
@section modules_sec Current modules
This is a short description of all current @c config library modules.
See classes documentation for more details.
<ul>
<li> Config - the main class which encapsulates all of the other data.
<li> Architecture - information about input binary's target architecture
(e.g. x86, little-arm, big-mips).
<li> CallingConvention - represents all known function calling conventions.
<li> FileFormat - information about input binary's file format (e.g. PE, ELF, COFF).
<li> FileType - describes input's binary file type (e.g. shared library, archive,
executable, object file).
<li> Parameters - holds all decompilation options.
<li> Function - contains relevant information about function.
Functions are gathered in FunctionContainer.
<li> Language - describes language used to create binary file.
Possible languages are kept in LanguageContainer.
<li> Object - represents objects like global variables, registers, etc.
Objects are gathered into ObjectContainer or other derived containers
(e.g GlobalVarContainer).
<li> Segment - represents input binary's segment. All segments are
stored in SegmentContainer.
<li> Storage - represents storage of objects.
<li> ToolInfo - tools that may have been used to create/manipulate input
binary (e.g. compiler, packer). Tools are kept in ToolInfoContainer.
<li> Type - represents data type for objects or functions.
All used types are in TypeContainer.
</ul>
@section includes Includes
To use entire @c config library, include only the main @c config.h file
using the full @c decdev path:
@code{.cpp}
#include "retdec/config/config.h"
@endcode
However, if you want to use only specific parts of the library, you can
include just the used header files:
@code{.cpp}
#include "retdec/config/architecture.h"
@endcode
@section Namespaces
All the classes, functions, and constants the library provides are
in the config namespace.
@section error_handling Error Handling
All excaptions thrown by library are derived from @c Exception class,
which can be used to catch them all.
The library throws @c ParseException exception if JSON parsing failed.
@c ConfigException contains information necessary to track the problem.
(see its documentation for more details).
The library throws @c FileNotFoundException exception if input file can
not be parsed.
The library evaluates asserts on some critical places. This can help during
the development, but can be disabled in a release version. Code should be written
in such a way, that it will not fail even if assert that would go off is disabled.
You also need to check return values of methods which may not succeed.
For example, method:
@code{.cpp}
config::Function* FunctionContainer::getFunctionByName(const std::string& name);
@endcode
returns @c nullptr rather than throwing an exception, if function of
the specified name is not found. All other methods behave like this as well.
@section json_naming JSON naming convention
These are the guidelines for JSON names.
It would be best if once used names never changed.
Therefore, use this conventions and think twice before adding a new name.
These guidelines are based on
<a href="http://google-styleguide.googlecode.com/svn/trunk/jsoncstyleguide.xml#Property_Name_Format">Google JSON Style Guide</a>.
For the purposes of this style guide, we define the following terms:
<ul>
<li>property = a name/value pair inside a JSON object.
<li>property name = the name (or key) portion of the property.
<li>property value = the value portion of the property.
</ul>
Example:
@code{.json}
{ "propertyName" : "propertyValue" } ///< property
@endcode
Guidelines:
<ul>
<li>No comments in JSON objects.
<li>Use double quotes.
<li>Choose meaningful property names.
<li>Property names must be @c camelCase, ASCII 0-127 strings.
<li>Array types should have plural property names. All other property
names should be singular.
<li>Avoid naming conflicts by choosing a new property name or versioning
the API.
<li>Enum values should be represented as strings.
<li>Data and time must be formatted the same way as in {front,back}-end.
</ul>
@section complete_example A Complete Example
In the following example, function @c createJson() creates an empty config instance,
fills it up with data, serialize it into JSON file and returns
a string with the name of this file. Then, function @c parseJson() takes the file name,
deserializes its content into Config internal representation and accesses its data.
@code{.cpp}
#include <iostream>
#include "retdec/config/config.h"
std::string createJson()
{
// Create an empty config file.
// All config entries are initialized to their default values.
config::Config config;
// Fill some basic information in to the file.
// Nothing complicated yet, a lot of stuff can be set only by using
// simple set*() methods.
config.architecture.setIsMips();
config.architecture.setIsEndianLittle();
config.fileType.setIsExecutable();
config.fileFormat.setIsElf32();
// Some set*() methods need a value to set.
config.setImageBase(0x1000);
config.parameters.setOutputFile("/decompilation/output/file.c");
// Other members are containers in which we can insert values.
config.parameters.abiPaths.insert("/path/to/abi");
config.parameters.selectedRanges.insert( config::AddressRangeJson(0x1000, 0x2000) );
// Some containers need a little bit more work to properly fill up.
// Here we create a function name 'my_function', set its calling convention
// and add parameter and local variable into it.
// Do not forget to add the function into config function container.
config::Function fnc("my_function");
fnc.callingConvention.setIsStdcall();
config::Object param("param", config::Storage::undefined());
param.type.setLlvmIr("i32");
config::Object local("local", config::Storage::onStack(-20));
param.type.setLlvmIr("i8*");
fnc.parameters.insert(param);
fnc.locals.insert(local);
config.functions.insert(fnc);
// Finally, we can serialize the config instance into a JSON string.
std::string json = config.generateJsonString();
// We return this JSON string, so that others can use it.
return json;
}
void parseJson(const std::string& json)
{
std::stringstream out;
// We again create an empty config file.
// We can initialize it manually like in the createJson() function,
// or by JSON string or JSON file like in this function.
config::Config config;
// String used in initialization must contain a valid JSON string.
// Empty JSON string is valid.
// It does not have to contain any obligatory values.
// Any missing values are set to default.
config.readJsonString("{}");
// Therefore, it might contain very little (or none) information.
config.readJsonString("{ \"inputPath\" : \"/input/path\" }");
// We can parse any JSON string.
// Any successful parsing by readJsonString() or readJsonFile() resets
// all existing values to their defaults before setting them again.
// Therefore, no previously set data survive these methods.
// For example, this call will reset "inputPath" property set in the
// previous line.
config.readJsonString(json);
// Now we can access all information from the file.
out << config.architecture.getName() << "\n";
out << config.architecture.isMips() << "\n";
out << config.parameters.getOutputFile() << "\n";
config::Function* fnc = config.functions.getFunctionByName("my_function");
if (fnc)
{
out << fnc->getName() << "\n";
}
}
int main()
{
std::string json = createJson();
parseJson(json);
return 0;
}
@endcode
The example works with JSON string generated by @c generateJsonString() and
processed by @c readJsonString(). However, it is possible to serialize the
config instance directly into file using @c generateJsonFile() method, and
parse it using @c readJsonFile() method.
*/
| 3,031 |
903 |
<reponame>dyzmapl/BumpTop
#include "../../src/gui/dialogs/qinputdialog.h"
| 35 |
779 |
<filename>vpx_dsp/x86/highbd_convolve_avx2.c
/*
* Copyright (c) 2017 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <immintrin.h>
#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/x86/convolve.h"
#include "vpx_dsp/x86/convolve_avx2.h"
// -----------------------------------------------------------------------------
// Copy and average
void vpx_highbd_convolve_copy_avx2(const uint16_t *src, ptrdiff_t src_stride,
uint16_t *dst, ptrdiff_t dst_stride,
const InterpKernel *filter, int x0_q4,
int x_step_q4, int y0_q4, int y_step_q4,
int w, int h, int bd) {
(void)filter;
(void)x0_q4;
(void)x_step_q4;
(void)y0_q4;
(void)y_step_q4;
(void)bd;
assert(w % 4 == 0);
if (w > 32) { // w = 64
do {
const __m256i p0 = _mm256_loadu_si256((const __m256i *)src);
const __m256i p1 = _mm256_loadu_si256((const __m256i *)(src + 16));
const __m256i p2 = _mm256_loadu_si256((const __m256i *)(src + 32));
const __m256i p3 = _mm256_loadu_si256((const __m256i *)(src + 48));
src += src_stride;
_mm256_storeu_si256((__m256i *)dst, p0);
_mm256_storeu_si256((__m256i *)(dst + 16), p1);
_mm256_storeu_si256((__m256i *)(dst + 32), p2);
_mm256_storeu_si256((__m256i *)(dst + 48), p3);
dst += dst_stride;
h--;
} while (h > 0);
} else if (w > 16) { // w = 32
do {
const __m256i p0 = _mm256_loadu_si256((const __m256i *)src);
const __m256i p1 = _mm256_loadu_si256((const __m256i *)(src + 16));
src += src_stride;
_mm256_storeu_si256((__m256i *)dst, p0);
_mm256_storeu_si256((__m256i *)(dst + 16), p1);
dst += dst_stride;
h--;
} while (h > 0);
} else if (w > 8) { // w = 16
__m256i p0, p1;
do {
p0 = _mm256_loadu_si256((const __m256i *)src);
src += src_stride;
p1 = _mm256_loadu_si256((const __m256i *)src);
src += src_stride;
_mm256_storeu_si256((__m256i *)dst, p0);
dst += dst_stride;
_mm256_storeu_si256((__m256i *)dst, p1);
dst += dst_stride;
h -= 2;
} while (h > 0);
} else if (w > 4) { // w = 8
__m128i p0, p1;
do {
p0 = _mm_loadu_si128((const __m128i *)src);
src += src_stride;
p1 = _mm_loadu_si128((const __m128i *)src);
src += src_stride;
_mm_storeu_si128((__m128i *)dst, p0);
dst += dst_stride;
_mm_storeu_si128((__m128i *)dst, p1);
dst += dst_stride;
h -= 2;
} while (h > 0);
} else { // w = 4
__m128i p0, p1;
do {
p0 = _mm_loadl_epi64((const __m128i *)src);
src += src_stride;
p1 = _mm_loadl_epi64((const __m128i *)src);
src += src_stride;
_mm_storel_epi64((__m128i *)dst, p0);
dst += dst_stride;
_mm_storel_epi64((__m128i *)dst, p1);
dst += dst_stride;
h -= 2;
} while (h > 0);
}
}
void vpx_highbd_convolve_avg_avx2(const uint16_t *src, ptrdiff_t src_stride,
uint16_t *dst, ptrdiff_t dst_stride,
const InterpKernel *filter, int x0_q4,
int x_step_q4, int y0_q4, int y_step_q4,
int w, int h, int bd) {
(void)filter;
(void)x0_q4;
(void)x_step_q4;
(void)y0_q4;
(void)y_step_q4;
(void)bd;
assert(w % 4 == 0);
if (w > 32) { // w = 64
__m256i p0, p1, p2, p3, u0, u1, u2, u3;
do {
p0 = _mm256_loadu_si256((const __m256i *)src);
p1 = _mm256_loadu_si256((const __m256i *)(src + 16));
p2 = _mm256_loadu_si256((const __m256i *)(src + 32));
p3 = _mm256_loadu_si256((const __m256i *)(src + 48));
src += src_stride;
u0 = _mm256_loadu_si256((const __m256i *)dst);
u1 = _mm256_loadu_si256((const __m256i *)(dst + 16));
u2 = _mm256_loadu_si256((const __m256i *)(dst + 32));
u3 = _mm256_loadu_si256((const __m256i *)(dst + 48));
_mm256_storeu_si256((__m256i *)dst, _mm256_avg_epu16(p0, u0));
_mm256_storeu_si256((__m256i *)(dst + 16), _mm256_avg_epu16(p1, u1));
_mm256_storeu_si256((__m256i *)(dst + 32), _mm256_avg_epu16(p2, u2));
_mm256_storeu_si256((__m256i *)(dst + 48), _mm256_avg_epu16(p3, u3));
dst += dst_stride;
h--;
} while (h > 0);
} else if (w > 16) { // w = 32
__m256i p0, p1, u0, u1;
do {
p0 = _mm256_loadu_si256((const __m256i *)src);
p1 = _mm256_loadu_si256((const __m256i *)(src + 16));
src += src_stride;
u0 = _mm256_loadu_si256((const __m256i *)dst);
u1 = _mm256_loadu_si256((const __m256i *)(dst + 16));
_mm256_storeu_si256((__m256i *)dst, _mm256_avg_epu16(p0, u0));
_mm256_storeu_si256((__m256i *)(dst + 16), _mm256_avg_epu16(p1, u1));
dst += dst_stride;
h--;
} while (h > 0);
} else if (w > 8) { // w = 16
__m256i p0, p1, u0, u1;
do {
p0 = _mm256_loadu_si256((const __m256i *)src);
p1 = _mm256_loadu_si256((const __m256i *)(src + src_stride));
src += src_stride << 1;
u0 = _mm256_loadu_si256((const __m256i *)dst);
u1 = _mm256_loadu_si256((const __m256i *)(dst + dst_stride));
_mm256_storeu_si256((__m256i *)dst, _mm256_avg_epu16(p0, u0));
_mm256_storeu_si256((__m256i *)(dst + dst_stride),
_mm256_avg_epu16(p1, u1));
dst += dst_stride << 1;
h -= 2;
} while (h > 0);
} else if (w > 4) { // w = 8
__m128i p0, p1, u0, u1;
do {
p0 = _mm_loadu_si128((const __m128i *)src);
p1 = _mm_loadu_si128((const __m128i *)(src + src_stride));
src += src_stride << 1;
u0 = _mm_loadu_si128((const __m128i *)dst);
u1 = _mm_loadu_si128((const __m128i *)(dst + dst_stride));
_mm_storeu_si128((__m128i *)dst, _mm_avg_epu16(p0, u0));
_mm_storeu_si128((__m128i *)(dst + dst_stride), _mm_avg_epu16(p1, u1));
dst += dst_stride << 1;
h -= 2;
} while (h > 0);
} else { // w = 4
__m128i p0, p1, u0, u1;
do {
p0 = _mm_loadl_epi64((const __m128i *)src);
p1 = _mm_loadl_epi64((const __m128i *)(src + src_stride));
src += src_stride << 1;
u0 = _mm_loadl_epi64((const __m128i *)dst);
u1 = _mm_loadl_epi64((const __m128i *)(dst + dst_stride));
_mm_storel_epi64((__m128i *)dst, _mm_avg_epu16(u0, p0));
_mm_storel_epi64((__m128i *)(dst + dst_stride), _mm_avg_epu16(u1, p1));
dst += dst_stride << 1;
h -= 2;
} while (h > 0);
}
}
// -----------------------------------------------------------------------------
// Horizontal and vertical filtering
static const uint8_t signal_pattern_0[32] = { 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6,
7, 6, 7, 8, 9, 0, 1, 2, 3, 2, 3,
4, 5, 4, 5, 6, 7, 6, 7, 8, 9 };
static const uint8_t signal_pattern_1[32] = { 4, 5, 6, 7, 6, 7, 8, 9,
8, 9, 10, 11, 10, 11, 12, 13,
4, 5, 6, 7, 6, 7, 8, 9,
8, 9, 10, 11, 10, 11, 12, 13 };
static const uint8_t signal_pattern_2[32] = { 6, 7, 8, 9, 8, 9, 10, 11,
10, 11, 12, 13, 12, 13, 14, 15,
6, 7, 8, 9, 8, 9, 10, 11,
10, 11, 12, 13, 12, 13, 14, 15 };
static const uint32_t signal_index[8] = { 2, 3, 4, 5, 2, 3, 4, 5 };
#define CONV8_ROUNDING_BITS (7)
#define CONV8_ROUNDING_NUM (1 << (CONV8_ROUNDING_BITS - 1))
// -----------------------------------------------------------------------------
// Horizontal Filtering
static INLINE void pack_pixels(const __m256i *s, __m256i *p /*p[4]*/) {
const __m256i idx = _mm256_loadu_si256((const __m256i *)signal_index);
const __m256i sf0 = _mm256_loadu_si256((const __m256i *)signal_pattern_0);
const __m256i sf1 = _mm256_loadu_si256((const __m256i *)signal_pattern_1);
const __m256i c = _mm256_permutevar8x32_epi32(*s, idx);
p[0] = _mm256_shuffle_epi8(*s, sf0); // x0x6
p[1] = _mm256_shuffle_epi8(*s, sf1); // x1x7
p[2] = _mm256_shuffle_epi8(c, sf0); // x2x4
p[3] = _mm256_shuffle_epi8(c, sf1); // x3x5
}
// Note:
// Shared by 8x2 and 16x1 block
static INLINE void pack_16_pixels(const __m256i *s0, const __m256i *s1,
__m256i *x /*x[8]*/) {
__m256i pp[8];
pack_pixels(s0, pp);
pack_pixels(s1, &pp[4]);
x[0] = _mm256_permute2x128_si256(pp[0], pp[4], 0x20);
x[1] = _mm256_permute2x128_si256(pp[1], pp[5], 0x20);
x[2] = _mm256_permute2x128_si256(pp[2], pp[6], 0x20);
x[3] = _mm256_permute2x128_si256(pp[3], pp[7], 0x20);
x[4] = x[2];
x[5] = x[3];
x[6] = _mm256_permute2x128_si256(pp[0], pp[4], 0x31);
x[7] = _mm256_permute2x128_si256(pp[1], pp[5], 0x31);
}
static INLINE void pack_8x1_pixels(const uint16_t *src, __m256i *x) {
__m256i pp[8];
__m256i s0;
s0 = _mm256_loadu_si256((const __m256i *)src);
pack_pixels(&s0, pp);
x[0] = _mm256_permute2x128_si256(pp[0], pp[2], 0x30);
x[1] = _mm256_permute2x128_si256(pp[1], pp[3], 0x30);
x[2] = _mm256_permute2x128_si256(pp[2], pp[0], 0x30);
x[3] = _mm256_permute2x128_si256(pp[3], pp[1], 0x30);
}
static INLINE void pack_8x2_pixels(const uint16_t *src, ptrdiff_t stride,
__m256i *x) {
__m256i s0, s1;
s0 = _mm256_loadu_si256((const __m256i *)src);
s1 = _mm256_loadu_si256((const __m256i *)(src + stride));
pack_16_pixels(&s0, &s1, x);
}
static INLINE void pack_16x1_pixels(const uint16_t *src, __m256i *x) {
__m256i s0, s1;
s0 = _mm256_loadu_si256((const __m256i *)src);
s1 = _mm256_loadu_si256((const __m256i *)(src + 8));
pack_16_pixels(&s0, &s1, x);
}
// Note:
// Shared by horizontal and vertical filtering
static INLINE void pack_filters(const int16_t *filter, __m256i *f /*f[4]*/) {
const __m128i h = _mm_loadu_si128((const __m128i *)filter);
const __m256i hh = _mm256_insertf128_si256(_mm256_castsi128_si256(h), h, 1);
const __m256i p0 = _mm256_set1_epi32(0x03020100);
const __m256i p1 = _mm256_set1_epi32(0x07060504);
const __m256i p2 = _mm256_set1_epi32(0x0b0a0908);
const __m256i p3 = _mm256_set1_epi32(0x0f0e0d0c);
f[0] = _mm256_shuffle_epi8(hh, p0);
f[1] = _mm256_shuffle_epi8(hh, p1);
f[2] = _mm256_shuffle_epi8(hh, p2);
f[3] = _mm256_shuffle_epi8(hh, p3);
}
static INLINE void filter_8x1_pixels(const __m256i *sig /*sig[4]*/,
const __m256i *fil /*fil[4]*/,
__m256i *y) {
__m256i a, a0, a1;
a0 = _mm256_madd_epi16(fil[0], sig[0]);
a1 = _mm256_madd_epi16(fil[3], sig[3]);
a = _mm256_add_epi32(a0, a1);
a0 = _mm256_madd_epi16(fil[1], sig[1]);
a1 = _mm256_madd_epi16(fil[2], sig[2]);
{
const __m256i min = _mm256_min_epi32(a0, a1);
a = _mm256_add_epi32(a, min);
}
{
const __m256i max = _mm256_max_epi32(a0, a1);
a = _mm256_add_epi32(a, max);
}
{
const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
a = _mm256_add_epi32(a, rounding);
*y = _mm256_srai_epi32(a, CONV8_ROUNDING_BITS);
}
}
static INLINE void store_8x1_pixels(const __m256i *y, const __m256i *mask,
uint16_t *dst) {
const __m128i a0 = _mm256_castsi256_si128(*y);
const __m128i a1 = _mm256_extractf128_si256(*y, 1);
__m128i res = _mm_packus_epi32(a0, a1);
res = _mm_min_epi16(res, _mm256_castsi256_si128(*mask));
_mm_storeu_si128((__m128i *)dst, res);
}
static INLINE void store_8x2_pixels(const __m256i *y0, const __m256i *y1,
const __m256i *mask, uint16_t *dst,
ptrdiff_t pitch) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
a = _mm256_min_epi16(a, *mask);
_mm_storeu_si128((__m128i *)dst, _mm256_castsi256_si128(a));
_mm_storeu_si128((__m128i *)(dst + pitch), _mm256_extractf128_si256(a, 1));
}
static INLINE void store_16x1_pixels(const __m256i *y0, const __m256i *y1,
const __m256i *mask, uint16_t *dst) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
a = _mm256_min_epi16(a, *mask);
_mm256_storeu_si256((__m256i *)dst, a);
}
static void vpx_highbd_filter_block1d8_h8_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[8], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff[4];
pack_filters(filter, ff);
src_ptr -= 3;
do {
pack_8x2_pixels(src_ptr, src_pitch, signal);
filter_8x1_pixels(signal, ff, &res0);
filter_8x1_pixels(&signal[4], ff, &res1);
store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
height -= 2;
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
} while (height > 1);
if (height > 0) {
pack_8x1_pixels(src_ptr, signal);
filter_8x1_pixels(signal, ff, &res0);
store_8x1_pixels(&res0, &max, dst_ptr);
}
}
static void vpx_highbd_filter_block1d16_h8_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[8], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff[4];
pack_filters(filter, ff);
src_ptr -= 3;
do {
pack_16x1_pixels(src_ptr, signal);
filter_8x1_pixels(signal, ff, &res0);
filter_8x1_pixels(&signal[4], ff, &res1);
store_16x1_pixels(&res0, &res1, &max, dst_ptr);
height -= 1;
src_ptr += src_pitch;
dst_ptr += dst_pitch;
} while (height > 0);
}
// -----------------------------------------------------------------------------
// 2-tap horizontal filtering
static INLINE void pack_2t_filter(const int16_t *filter, __m256i *f) {
const __m128i h = _mm_loadu_si128((const __m128i *)filter);
const __m256i hh = _mm256_insertf128_si256(_mm256_castsi128_si256(h), h, 1);
const __m256i p = _mm256_set1_epi32(0x09080706);
f[0] = _mm256_shuffle_epi8(hh, p);
}
// can be used by pack_8x2_2t_pixels() and pack_16x1_2t_pixels()
// the difference is s0/s1 specifies first and second rows or,
// first 16 samples and 8-sample shifted 16 samples
static INLINE void pack_16_2t_pixels(const __m256i *s0, const __m256i *s1,
__m256i *sig) {
const __m256i idx = _mm256_loadu_si256((const __m256i *)signal_index);
const __m256i sf2 = _mm256_loadu_si256((const __m256i *)signal_pattern_2);
__m256i x0 = _mm256_shuffle_epi8(*s0, sf2);
__m256i x1 = _mm256_shuffle_epi8(*s1, sf2);
__m256i r0 = _mm256_permutevar8x32_epi32(*s0, idx);
__m256i r1 = _mm256_permutevar8x32_epi32(*s1, idx);
r0 = _mm256_shuffle_epi8(r0, sf2);
r1 = _mm256_shuffle_epi8(r1, sf2);
sig[0] = _mm256_permute2x128_si256(x0, x1, 0x20);
sig[1] = _mm256_permute2x128_si256(r0, r1, 0x20);
}
static INLINE void pack_8x2_2t_pixels(const uint16_t *src,
const ptrdiff_t pitch, __m256i *sig) {
const __m256i r0 = _mm256_loadu_si256((const __m256i *)src);
const __m256i r1 = _mm256_loadu_si256((const __m256i *)(src + pitch));
pack_16_2t_pixels(&r0, &r1, sig);
}
static INLINE void pack_16x1_2t_pixels(const uint16_t *src,
__m256i *sig /*sig[2]*/) {
const __m256i r0 = _mm256_loadu_si256((const __m256i *)src);
const __m256i r1 = _mm256_loadu_si256((const __m256i *)(src + 8));
pack_16_2t_pixels(&r0, &r1, sig);
}
static INLINE void pack_8x1_2t_pixels(const uint16_t *src,
__m256i *sig /*sig[2]*/) {
const __m256i idx = _mm256_loadu_si256((const __m256i *)signal_index);
const __m256i sf2 = _mm256_loadu_si256((const __m256i *)signal_pattern_2);
__m256i r0 = _mm256_loadu_si256((const __m256i *)src);
__m256i x0 = _mm256_shuffle_epi8(r0, sf2);
r0 = _mm256_permutevar8x32_epi32(r0, idx);
r0 = _mm256_shuffle_epi8(r0, sf2);
sig[0] = _mm256_permute2x128_si256(x0, r0, 0x20);
}
// can be used by filter_8x2_2t_pixels() and filter_16x1_2t_pixels()
static INLINE void filter_16_2t_pixels(const __m256i *sig, const __m256i *f,
__m256i *y0, __m256i *y1) {
const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
__m256i x0 = _mm256_madd_epi16(sig[0], *f);
__m256i x1 = _mm256_madd_epi16(sig[1], *f);
x0 = _mm256_add_epi32(x0, rounding);
x1 = _mm256_add_epi32(x1, rounding);
*y0 = _mm256_srai_epi32(x0, CONV8_ROUNDING_BITS);
*y1 = _mm256_srai_epi32(x1, CONV8_ROUNDING_BITS);
}
static INLINE void filter_8x1_2t_pixels(const __m256i *sig, const __m256i *f,
__m256i *y0) {
const __m256i rounding = _mm256_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
__m256i x0 = _mm256_madd_epi16(sig[0], *f);
x0 = _mm256_add_epi32(x0, rounding);
*y0 = _mm256_srai_epi32(x0, CONV8_ROUNDING_BITS);
}
static void vpx_highbd_filter_block1d8_h2_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[2], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff;
pack_2t_filter(filter, &ff);
src_ptr -= 3;
do {
pack_8x2_2t_pixels(src_ptr, src_pitch, signal);
filter_16_2t_pixels(signal, &ff, &res0, &res1);
store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
height -= 2;
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
} while (height > 1);
if (height > 0) {
pack_8x1_2t_pixels(src_ptr, signal);
filter_8x1_2t_pixels(signal, &ff, &res0);
store_8x1_pixels(&res0, &max, dst_ptr);
}
}
static void vpx_highbd_filter_block1d16_h2_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[2], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff;
pack_2t_filter(filter, &ff);
src_ptr -= 3;
do {
pack_16x1_2t_pixels(src_ptr, signal);
filter_16_2t_pixels(signal, &ff, &res0, &res1);
store_16x1_pixels(&res0, &res1, &max, dst_ptr);
height -= 1;
src_ptr += src_pitch;
dst_ptr += dst_pitch;
} while (height > 0);
}
// -----------------------------------------------------------------------------
// Vertical Filtering
static void pack_8x9_init(const uint16_t *src, ptrdiff_t pitch, __m256i *sig) {
__m256i s0 = _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)src));
__m256i s1 =
_mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(src + pitch)));
__m256i s2 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src + 2 * pitch)));
__m256i s3 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src + 3 * pitch)));
__m256i s4 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src + 4 * pitch)));
__m256i s5 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src + 5 * pitch)));
__m256i s6 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src + 6 * pitch)));
s0 = _mm256_inserti128_si256(s0, _mm256_castsi256_si128(s1), 1);
s1 = _mm256_inserti128_si256(s1, _mm256_castsi256_si128(s2), 1);
s2 = _mm256_inserti128_si256(s2, _mm256_castsi256_si128(s3), 1);
s3 = _mm256_inserti128_si256(s3, _mm256_castsi256_si128(s4), 1);
s4 = _mm256_inserti128_si256(s4, _mm256_castsi256_si128(s5), 1);
s5 = _mm256_inserti128_si256(s5, _mm256_castsi256_si128(s6), 1);
sig[0] = _mm256_unpacklo_epi16(s0, s1);
sig[4] = _mm256_unpackhi_epi16(s0, s1);
sig[1] = _mm256_unpacklo_epi16(s2, s3);
sig[5] = _mm256_unpackhi_epi16(s2, s3);
sig[2] = _mm256_unpacklo_epi16(s4, s5);
sig[6] = _mm256_unpackhi_epi16(s4, s5);
sig[8] = s6;
}
static INLINE void pack_8x9_pixels(const uint16_t *src, ptrdiff_t pitch,
__m256i *sig) {
// base + 7th row
__m256i s0 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src + 7 * pitch)));
// base + 8th row
__m256i s1 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src + 8 * pitch)));
__m256i s2 = _mm256_inserti128_si256(sig[8], _mm256_castsi256_si128(s0), 1);
__m256i s3 = _mm256_inserti128_si256(s0, _mm256_castsi256_si128(s1), 1);
sig[3] = _mm256_unpacklo_epi16(s2, s3);
sig[7] = _mm256_unpackhi_epi16(s2, s3);
sig[8] = s1;
}
static INLINE void filter_8x9_pixels(const __m256i *sig, const __m256i *f,
__m256i *y0, __m256i *y1) {
filter_8x1_pixels(sig, f, y0);
filter_8x1_pixels(&sig[4], f, y1);
}
static INLINE void update_pixels(__m256i *sig) {
int i;
for (i = 0; i < 3; ++i) {
sig[i] = sig[i + 1];
sig[i + 4] = sig[i + 5];
}
}
static void vpx_highbd_filter_block1d8_v8_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[9], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff[4];
pack_filters(filter, ff);
pack_8x9_init(src_ptr, src_pitch, signal);
do {
pack_8x9_pixels(src_ptr, src_pitch, signal);
filter_8x9_pixels(signal, ff, &res0, &res1);
store_8x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
update_pixels(signal);
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
height -= 2;
} while (height > 0);
}
static void pack_16x9_init(const uint16_t *src, ptrdiff_t pitch, __m256i *sig) {
__m256i u0, u1, u2, u3;
// load 0-6 rows
const __m256i s0 = _mm256_loadu_si256((const __m256i *)src);
const __m256i s1 = _mm256_loadu_si256((const __m256i *)(src + pitch));
const __m256i s2 = _mm256_loadu_si256((const __m256i *)(src + 2 * pitch));
const __m256i s3 = _mm256_loadu_si256((const __m256i *)(src + 3 * pitch));
const __m256i s4 = _mm256_loadu_si256((const __m256i *)(src + 4 * pitch));
const __m256i s5 = _mm256_loadu_si256((const __m256i *)(src + 5 * pitch));
const __m256i s6 = _mm256_loadu_si256((const __m256i *)(src + 6 * pitch));
u0 = _mm256_permute2x128_si256(s0, s1, 0x20); // 0, 1 low
u1 = _mm256_permute2x128_si256(s0, s1, 0x31); // 0, 1 high
u2 = _mm256_permute2x128_si256(s1, s2, 0x20); // 1, 2 low
u3 = _mm256_permute2x128_si256(s1, s2, 0x31); // 1, 2 high
sig[0] = _mm256_unpacklo_epi16(u0, u2);
sig[4] = _mm256_unpackhi_epi16(u0, u2);
sig[8] = _mm256_unpacklo_epi16(u1, u3);
sig[12] = _mm256_unpackhi_epi16(u1, u3);
u0 = _mm256_permute2x128_si256(s2, s3, 0x20);
u1 = _mm256_permute2x128_si256(s2, s3, 0x31);
u2 = _mm256_permute2x128_si256(s3, s4, 0x20);
u3 = _mm256_permute2x128_si256(s3, s4, 0x31);
sig[1] = _mm256_unpacklo_epi16(u0, u2);
sig[5] = _mm256_unpackhi_epi16(u0, u2);
sig[9] = _mm256_unpacklo_epi16(u1, u3);
sig[13] = _mm256_unpackhi_epi16(u1, u3);
u0 = _mm256_permute2x128_si256(s4, s5, 0x20);
u1 = _mm256_permute2x128_si256(s4, s5, 0x31);
u2 = _mm256_permute2x128_si256(s5, s6, 0x20);
u3 = _mm256_permute2x128_si256(s5, s6, 0x31);
sig[2] = _mm256_unpacklo_epi16(u0, u2);
sig[6] = _mm256_unpackhi_epi16(u0, u2);
sig[10] = _mm256_unpacklo_epi16(u1, u3);
sig[14] = _mm256_unpackhi_epi16(u1, u3);
sig[16] = s6;
}
static void pack_16x9_pixels(const uint16_t *src, ptrdiff_t pitch,
__m256i *sig) {
// base + 7th row
const __m256i s7 = _mm256_loadu_si256((const __m256i *)(src + 7 * pitch));
// base + 8th row
const __m256i s8 = _mm256_loadu_si256((const __m256i *)(src + 8 * pitch));
__m256i u0, u1, u2, u3;
u0 = _mm256_permute2x128_si256(sig[16], s7, 0x20);
u1 = _mm256_permute2x128_si256(sig[16], s7, 0x31);
u2 = _mm256_permute2x128_si256(s7, s8, 0x20);
u3 = _mm256_permute2x128_si256(s7, s8, 0x31);
sig[3] = _mm256_unpacklo_epi16(u0, u2);
sig[7] = _mm256_unpackhi_epi16(u0, u2);
sig[11] = _mm256_unpacklo_epi16(u1, u3);
sig[15] = _mm256_unpackhi_epi16(u1, u3);
sig[16] = s8;
}
static INLINE void filter_16x9_pixels(const __m256i *sig, const __m256i *f,
__m256i *y0, __m256i *y1) {
__m256i res[4];
int i;
for (i = 0; i < 4; ++i) {
filter_8x1_pixels(&sig[i << 2], f, &res[i]);
}
{
const __m256i l0l1 = _mm256_packus_epi32(res[0], res[1]);
const __m256i h0h1 = _mm256_packus_epi32(res[2], res[3]);
*y0 = _mm256_permute2x128_si256(l0l1, h0h1, 0x20);
*y1 = _mm256_permute2x128_si256(l0l1, h0h1, 0x31);
}
}
static INLINE void store_16x2_pixels(const __m256i *y0, const __m256i *y1,
const __m256i *mask, uint16_t *dst,
ptrdiff_t pitch) {
__m256i p = _mm256_min_epi16(*y0, *mask);
_mm256_storeu_si256((__m256i *)dst, p);
p = _mm256_min_epi16(*y1, *mask);
_mm256_storeu_si256((__m256i *)(dst + pitch), p);
}
static void update_16x9_pixels(__m256i *sig) {
update_pixels(&sig[0]);
update_pixels(&sig[8]);
}
static void vpx_highbd_filter_block1d16_v8_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[17], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff[4];
pack_filters(filter, ff);
pack_16x9_init(src_ptr, src_pitch, signal);
do {
pack_16x9_pixels(src_ptr, src_pitch, signal);
filter_16x9_pixels(signal, ff, &res0, &res1);
store_16x2_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
update_16x9_pixels(signal);
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
height -= 2;
} while (height > 0);
}
// -----------------------------------------------------------------------------
// 2-tap vertical filtering
static void pack_16x2_init(const uint16_t *src, __m256i *sig) {
sig[2] = _mm256_loadu_si256((const __m256i *)src);
}
static INLINE void pack_16x2_2t_pixels(const uint16_t *src, ptrdiff_t pitch,
__m256i *sig) {
// load the next row
const __m256i u = _mm256_loadu_si256((const __m256i *)(src + pitch));
sig[0] = _mm256_unpacklo_epi16(sig[2], u);
sig[1] = _mm256_unpackhi_epi16(sig[2], u);
sig[2] = u;
}
static INLINE void filter_16x2_2t_pixels(const __m256i *sig, const __m256i *f,
__m256i *y0, __m256i *y1) {
filter_16_2t_pixels(sig, f, y0, y1);
}
static void vpx_highbd_filter_block1d16_v2_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[3], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff;
pack_2t_filter(filter, &ff);
pack_16x2_init(src_ptr, signal);
do {
pack_16x2_2t_pixels(src_ptr, src_pitch, signal);
filter_16x2_2t_pixels(signal, &ff, &res0, &res1);
store_16x1_pixels(&res0, &res1, &max, dst_ptr);
src_ptr += src_pitch;
dst_ptr += dst_pitch;
height -= 1;
} while (height > 0);
}
static INLINE void pack_8x1_2t_filter(const int16_t *filter, __m128i *f) {
const __m128i h = _mm_loadu_si128((const __m128i *)filter);
const __m128i p = _mm_set1_epi32(0x09080706);
f[0] = _mm_shuffle_epi8(h, p);
}
static void pack_8x2_init(const uint16_t *src, __m128i *sig) {
sig[2] = _mm_loadu_si128((const __m128i *)src);
}
static INLINE void pack_8x2_2t_pixels_ver(const uint16_t *src, ptrdiff_t pitch,
__m128i *sig) {
// load the next row
const __m128i u = _mm_loadu_si128((const __m128i *)(src + pitch));
sig[0] = _mm_unpacklo_epi16(sig[2], u);
sig[1] = _mm_unpackhi_epi16(sig[2], u);
sig[2] = u;
}
static INLINE void filter_8_2t_pixels(const __m128i *sig, const __m128i *f,
__m128i *y0, __m128i *y1) {
const __m128i rounding = _mm_set1_epi32(1 << (CONV8_ROUNDING_BITS - 1));
__m128i x0 = _mm_madd_epi16(sig[0], *f);
__m128i x1 = _mm_madd_epi16(sig[1], *f);
x0 = _mm_add_epi32(x0, rounding);
x1 = _mm_add_epi32(x1, rounding);
*y0 = _mm_srai_epi32(x0, CONV8_ROUNDING_BITS);
*y1 = _mm_srai_epi32(x1, CONV8_ROUNDING_BITS);
}
static INLINE void store_8x1_2t_pixels_ver(const __m128i *y0, const __m128i *y1,
const __m128i *mask, uint16_t *dst) {
__m128i res = _mm_packus_epi32(*y0, *y1);
res = _mm_min_epi16(res, *mask);
_mm_storeu_si128((__m128i *)dst, res);
}
static void vpx_highbd_filter_block1d8_v2_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m128i signal[3], res0, res1;
const __m128i max = _mm_set1_epi16((1 << bd) - 1);
__m128i ff;
pack_8x1_2t_filter(filter, &ff);
pack_8x2_init(src_ptr, signal);
do {
pack_8x2_2t_pixels_ver(src_ptr, src_pitch, signal);
filter_8_2t_pixels(signal, &ff, &res0, &res1);
store_8x1_2t_pixels_ver(&res0, &res1, &max, dst_ptr);
src_ptr += src_pitch;
dst_ptr += dst_pitch;
height -= 1;
} while (height > 0);
}
// Calculation with averaging the input pixels
static INLINE void store_8x1_avg_pixels(const __m256i *y0, const __m256i *mask,
uint16_t *dst) {
const __m128i a0 = _mm256_castsi256_si128(*y0);
const __m128i a1 = _mm256_extractf128_si256(*y0, 1);
__m128i res = _mm_packus_epi32(a0, a1);
const __m128i pix = _mm_loadu_si128((const __m128i *)dst);
res = _mm_min_epi16(res, _mm256_castsi256_si128(*mask));
res = _mm_avg_epu16(res, pix);
_mm_storeu_si128((__m128i *)dst, res);
}
static INLINE void store_8x2_avg_pixels(const __m256i *y0, const __m256i *y1,
const __m256i *mask, uint16_t *dst,
ptrdiff_t pitch) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
const __m128i pix0 = _mm_loadu_si128((const __m128i *)dst);
const __m128i pix1 = _mm_loadu_si128((const __m128i *)(dst + pitch));
const __m256i pix =
_mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
a = _mm256_min_epi16(a, *mask);
a = _mm256_avg_epu16(a, pix);
_mm_storeu_si128((__m128i *)dst, _mm256_castsi256_si128(a));
_mm_storeu_si128((__m128i *)(dst + pitch), _mm256_extractf128_si256(a, 1));
}
static INLINE void store_16x1_avg_pixels(const __m256i *y0, const __m256i *y1,
const __m256i *mask, uint16_t *dst) {
__m256i a = _mm256_packus_epi32(*y0, *y1);
const __m256i pix = _mm256_loadu_si256((const __m256i *)dst);
a = _mm256_min_epi16(a, *mask);
a = _mm256_avg_epu16(a, pix);
_mm256_storeu_si256((__m256i *)dst, a);
}
static INLINE void store_16x2_avg_pixels(const __m256i *y0, const __m256i *y1,
const __m256i *mask, uint16_t *dst,
ptrdiff_t pitch) {
const __m256i pix0 = _mm256_loadu_si256((const __m256i *)dst);
const __m256i pix1 = _mm256_loadu_si256((const __m256i *)(dst + pitch));
__m256i p = _mm256_min_epi16(*y0, *mask);
p = _mm256_avg_epu16(p, pix0);
_mm256_storeu_si256((__m256i *)dst, p);
p = _mm256_min_epi16(*y1, *mask);
p = _mm256_avg_epu16(p, pix1);
_mm256_storeu_si256((__m256i *)(dst + pitch), p);
}
static INLINE void store_8x1_2t_avg_pixels_ver(const __m128i *y0,
const __m128i *y1,
const __m128i *mask,
uint16_t *dst) {
__m128i res = _mm_packus_epi32(*y0, *y1);
const __m128i pix = _mm_loadu_si128((const __m128i *)dst);
res = _mm_min_epi16(res, *mask);
res = _mm_avg_epu16(res, pix);
_mm_storeu_si128((__m128i *)dst, res);
}
static void vpx_highbd_filter_block1d8_h8_avg_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[8], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff[4];
pack_filters(filter, ff);
src_ptr -= 3;
do {
pack_8x2_pixels(src_ptr, src_pitch, signal);
filter_8x1_pixels(signal, ff, &res0);
filter_8x1_pixels(&signal[4], ff, &res1);
store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
height -= 2;
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
} while (height > 1);
if (height > 0) {
pack_8x1_pixels(src_ptr, signal);
filter_8x1_pixels(signal, ff, &res0);
store_8x1_avg_pixels(&res0, &max, dst_ptr);
}
}
static void vpx_highbd_filter_block1d16_h8_avg_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[8], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff[4];
pack_filters(filter, ff);
src_ptr -= 3;
do {
pack_16x1_pixels(src_ptr, signal);
filter_8x1_pixels(signal, ff, &res0);
filter_8x1_pixels(&signal[4], ff, &res1);
store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
height -= 1;
src_ptr += src_pitch;
dst_ptr += dst_pitch;
} while (height > 0);
}
static void vpx_highbd_filter_block1d4_h4_avx2(
const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
ptrdiff_t dst_stride, uint32_t height, const int16_t *kernel, int bd) {
// We extract the middle four elements of the kernel into two registers in
// the form
// ... k[3] k[2] k[3] k[2]
// ... k[5] k[4] k[5] k[4]
// Then we shuffle the source into
// ... s[1] s[0] s[0] s[-1]
// ... s[3] s[2] s[2] s[1]
// Calling multiply and add gives us half of the sum. Calling add on the two
// halves gives us the output. Since avx2 allows us to use 256-bit buffer, we
// can do this two rows at a time.
__m256i src_reg, src_reg_shift_0, src_reg_shift_2;
__m256i res_reg;
__m256i idx_shift_0 =
_mm256_setr_epi8(0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9, 0, 1, 2,
3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9);
__m256i idx_shift_2 =
_mm256_setr_epi8(4, 5, 6, 7, 6, 7, 8, 9, 8, 9, 10, 11, 10, 11, 12, 13, 4,
5, 6, 7, 6, 7, 8, 9, 8, 9, 10, 11, 10, 11, 12, 13);
__m128i kernel_reg_128; // Kernel
__m256i kernel_reg, kernel_reg_23,
kernel_reg_45; // Segments of the kernel used
const __m256i reg_round =
_mm256_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
const __m256i reg_max = _mm256_set1_epi16((1 << bd) - 1);
const ptrdiff_t unrolled_src_stride = src_stride << 1;
const ptrdiff_t unrolled_dst_stride = dst_stride << 1;
int h;
// Start one pixel before as we need tap/2 - 1 = 1 sample from the past
src_ptr -= 1;
// Load Kernel
kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
kernel_reg_23 = _mm256_shuffle_epi32(kernel_reg, 0x55);
kernel_reg_45 = _mm256_shuffle_epi32(kernel_reg, 0xaa);
for (h = height; h >= 2; h -= 2) {
// Load the source
src_reg = mm256_loadu2_si128(src_ptr, src_ptr + src_stride);
src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
// Get the output
res_reg = mm256_madd_add_epi32(&src_reg_shift_0, &src_reg_shift_2,
&kernel_reg_23, &kernel_reg_45);
// Round the result
res_reg = mm256_round_epi32(&res_reg, ®_round, CONV8_ROUNDING_BITS);
// Finally combine to get the final dst
res_reg = _mm256_packus_epi32(res_reg, res_reg);
res_reg = _mm256_min_epi16(res_reg, reg_max);
mm256_storeu2_epi64((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
&res_reg);
src_ptr += unrolled_src_stride;
dst_ptr += unrolled_dst_stride;
}
// Repeat for the last row if needed
if (h > 0) {
// Load the source
src_reg = mm256_loadu2_si128(src_ptr, src_ptr + 4);
src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
// Get the output
res_reg = mm256_madd_add_epi32(&src_reg_shift_0, &src_reg_shift_2,
&kernel_reg_23, &kernel_reg_45);
// Round the result
res_reg = mm256_round_epi32(&res_reg, ®_round, CONV8_ROUNDING_BITS);
// Finally combine to get the final dst
res_reg = _mm256_packus_epi32(res_reg, res_reg);
res_reg = _mm256_min_epi16(res_reg, reg_max);
_mm_storel_epi64((__m128i *)dst_ptr, _mm256_castsi256_si128(res_reg));
}
}
static void vpx_highbd_filter_block1d8_h4_avx2(
const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
ptrdiff_t dst_stride, uint32_t height, const int16_t *kernel, int bd) {
// We will extract the middle four elements of the kernel into two registers
// in the form
// ... k[3] k[2] k[3] k[2]
// ... k[5] k[4] k[5] k[4]
// Then we shuffle the source into
// ... s[1] s[0] s[0] s[-1]
// ... s[3] s[2] s[2] s[1]
// Calling multiply and add gives us half of the sum of the first half.
// Calling add gives us first half of the output. Repat again to get the whole
// output. Since avx2 allows us to use 256-bit buffer, we can do this two rows
// at a time.
__m256i src_reg, src_reg_shift_0, src_reg_shift_2;
__m256i res_reg, res_first, res_last;
__m256i idx_shift_0 =
_mm256_setr_epi8(0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9, 0, 1, 2,
3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9);
__m256i idx_shift_2 =
_mm256_setr_epi8(4, 5, 6, 7, 6, 7, 8, 9, 8, 9, 10, 11, 10, 11, 12, 13, 4,
5, 6, 7, 6, 7, 8, 9, 8, 9, 10, 11, 10, 11, 12, 13);
__m128i kernel_reg_128; // Kernel
__m256i kernel_reg, kernel_reg_23,
kernel_reg_45; // Segments of the kernel used
const __m256i reg_round =
_mm256_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
const __m256i reg_max = _mm256_set1_epi16((1 << bd) - 1);
const ptrdiff_t unrolled_src_stride = src_stride << 1;
const ptrdiff_t unrolled_dst_stride = dst_stride << 1;
int h;
// Start one pixel before as we need tap/2 - 1 = 1 sample from the past
src_ptr -= 1;
// Load Kernel
kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
kernel_reg_23 = _mm256_shuffle_epi32(kernel_reg, 0x55);
kernel_reg_45 = _mm256_shuffle_epi32(kernel_reg, 0xaa);
for (h = height; h >= 2; h -= 2) {
// Load the source
src_reg = mm256_loadu2_si128(src_ptr, src_ptr + src_stride);
src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
// Result for first half
res_first = mm256_madd_add_epi32(&src_reg_shift_0, &src_reg_shift_2,
&kernel_reg_23, &kernel_reg_45);
// Do again to get the second half of dst
// Load the source
src_reg = mm256_loadu2_si128(src_ptr + 4, src_ptr + src_stride + 4);
src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
// Result for second half
res_last = mm256_madd_add_epi32(&src_reg_shift_0, &src_reg_shift_2,
&kernel_reg_23, &kernel_reg_45);
// Round each result
res_first = mm256_round_epi32(&res_first, ®_round, CONV8_ROUNDING_BITS);
res_last = mm256_round_epi32(&res_last, ®_round, CONV8_ROUNDING_BITS);
// Finally combine to get the final dst
res_reg = _mm256_packus_epi32(res_first, res_last);
res_reg = _mm256_min_epi16(res_reg, reg_max);
mm256_store2_si128((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
&res_reg);
src_ptr += unrolled_src_stride;
dst_ptr += unrolled_dst_stride;
}
// Repeat for the last row if needed
if (h > 0) {
src_reg = mm256_loadu2_si128(src_ptr, src_ptr + 4);
src_reg_shift_0 = _mm256_shuffle_epi8(src_reg, idx_shift_0);
src_reg_shift_2 = _mm256_shuffle_epi8(src_reg, idx_shift_2);
res_reg = mm256_madd_add_epi32(&src_reg_shift_0, &src_reg_shift_2,
&kernel_reg_23, &kernel_reg_45);
res_reg = mm256_round_epi32(&res_reg, ®_round, CONV8_ROUNDING_BITS);
res_reg = _mm256_packus_epi32(res_reg, res_reg);
res_reg = _mm256_min_epi16(res_reg, reg_max);
mm256_storeu2_epi64((__m128i *)dst_ptr, (__m128i *)(dst_ptr + 4), &res_reg);
}
}
static void vpx_highbd_filter_block1d16_h4_avx2(
const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
ptrdiff_t dst_stride, uint32_t height, const int16_t *kernel, int bd) {
vpx_highbd_filter_block1d8_h4_avx2(src_ptr, src_stride, dst_ptr, dst_stride,
height, kernel, bd);
vpx_highbd_filter_block1d8_h4_avx2(src_ptr + 8, src_stride, dst_ptr + 8,
dst_stride, height, kernel, bd);
}
static void vpx_highbd_filter_block1d8_v8_avg_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[9], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff[4];
pack_filters(filter, ff);
pack_8x9_init(src_ptr, src_pitch, signal);
do {
pack_8x9_pixels(src_ptr, src_pitch, signal);
filter_8x9_pixels(signal, ff, &res0, &res1);
store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
update_pixels(signal);
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
height -= 2;
} while (height > 0);
}
static void vpx_highbd_filter_block1d16_v8_avg_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[17], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff[4];
pack_filters(filter, ff);
pack_16x9_init(src_ptr, src_pitch, signal);
do {
pack_16x9_pixels(src_ptr, src_pitch, signal);
filter_16x9_pixels(signal, ff, &res0, &res1);
store_16x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
update_16x9_pixels(signal);
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
height -= 2;
} while (height > 0);
}
static void vpx_highbd_filter_block1d8_h2_avg_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[2], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff;
pack_2t_filter(filter, &ff);
src_ptr -= 3;
do {
pack_8x2_2t_pixels(src_ptr, src_pitch, signal);
filter_16_2t_pixels(signal, &ff, &res0, &res1);
store_8x2_avg_pixels(&res0, &res1, &max, dst_ptr, dst_pitch);
height -= 2;
src_ptr += src_pitch << 1;
dst_ptr += dst_pitch << 1;
} while (height > 1);
if (height > 0) {
pack_8x1_2t_pixels(src_ptr, signal);
filter_8x1_2t_pixels(signal, &ff, &res0);
store_8x1_avg_pixels(&res0, &max, dst_ptr);
}
}
static void vpx_highbd_filter_block1d16_h2_avg_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[2], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff;
pack_2t_filter(filter, &ff);
src_ptr -= 3;
do {
pack_16x1_2t_pixels(src_ptr, signal);
filter_16_2t_pixels(signal, &ff, &res0, &res1);
store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
height -= 1;
src_ptr += src_pitch;
dst_ptr += dst_pitch;
} while (height > 0);
}
static void vpx_highbd_filter_block1d16_v2_avg_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m256i signal[3], res0, res1;
const __m256i max = _mm256_set1_epi16((1 << bd) - 1);
__m256i ff;
pack_2t_filter(filter, &ff);
pack_16x2_init(src_ptr, signal);
do {
pack_16x2_2t_pixels(src_ptr, src_pitch, signal);
filter_16x2_2t_pixels(signal, &ff, &res0, &res1);
store_16x1_avg_pixels(&res0, &res1, &max, dst_ptr);
src_ptr += src_pitch;
dst_ptr += dst_pitch;
height -= 1;
} while (height > 0);
}
static void vpx_highbd_filter_block1d8_v2_avg_avx2(
const uint16_t *src_ptr, ptrdiff_t src_pitch, uint16_t *dst_ptr,
ptrdiff_t dst_pitch, uint32_t height, const int16_t *filter, int bd) {
__m128i signal[3], res0, res1;
const __m128i max = _mm_set1_epi16((1 << bd) - 1);
__m128i ff;
pack_8x1_2t_filter(filter, &ff);
pack_8x2_init(src_ptr, signal);
do {
pack_8x2_2t_pixels_ver(src_ptr, src_pitch, signal);
filter_8_2t_pixels(signal, &ff, &res0, &res1);
store_8x1_2t_avg_pixels_ver(&res0, &res1, &max, dst_ptr);
src_ptr += src_pitch;
dst_ptr += dst_pitch;
height -= 1;
} while (height > 0);
}
static void vpx_highbd_filter_block1d4_v4_avx2(
const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
ptrdiff_t dst_stride, uint32_t height, const int16_t *kernel, int bd) {
// We will load two rows of pixels and rearrange them into the form
// ... s[1,0] s[0,0] s[0,0] s[-1,0]
// so that we can call multiply and add with the kernel partial output. Then
// we can call add with another row to get the output.
// Register for source s[-1:3, :]
__m256i src_reg_1, src_reg_2, src_reg_3;
// Interleaved rows of the source. lo is first half, hi second
__m256i src_reg_m10, src_reg_01, src_reg_12, src_reg_23;
__m256i src_reg_m1001, src_reg_1223;
// Result after multiply and add
__m256i res_reg;
__m128i kernel_reg_128; // Kernel
__m256i kernel_reg, kernel_reg_23, kernel_reg_45; // Segments of kernel used
const __m256i reg_round =
_mm256_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
const __m256i reg_max = _mm256_set1_epi16((1 << bd) - 1);
const ptrdiff_t src_stride_unrolled = src_stride << 1;
const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
int h;
// Load Kernel
kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
kernel_reg_23 = _mm256_shuffle_epi32(kernel_reg, 0x55);
kernel_reg_45 = _mm256_shuffle_epi32(kernel_reg, 0xaa);
// Row -1 to row 0
src_reg_m10 = mm256_loadu2_epi64((const __m128i *)src_ptr,
(const __m128i *)(src_ptr + src_stride));
// Row 0 to row 1
src_reg_1 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2)));
src_reg_01 = _mm256_permute2x128_si256(src_reg_m10, src_reg_1, 0x21);
// First three rows
src_reg_m1001 = _mm256_unpacklo_epi16(src_reg_m10, src_reg_01);
for (h = height; h > 1; h -= 2) {
src_reg_2 = _mm256_castsi128_si256(
_mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 3)));
src_reg_12 = _mm256_inserti128_si256(src_reg_1,
_mm256_castsi256_si128(src_reg_2), 1);
src_reg_3 = _mm256_castsi128_si256(
_mm_loadl_epi64((const __m128i *)(src_ptr + src_stride * 4)));
src_reg_23 = _mm256_inserti128_si256(src_reg_2,
_mm256_castsi256_si128(src_reg_3), 1);
// Last three rows
src_reg_1223 = _mm256_unpacklo_epi16(src_reg_12, src_reg_23);
// Output
res_reg = mm256_madd_add_epi32(&src_reg_m1001, &src_reg_1223,
&kernel_reg_23, &kernel_reg_45);
// Round the words
res_reg = mm256_round_epi32(&res_reg, ®_round, CONV8_ROUNDING_BITS);
// Combine to get the result
res_reg = _mm256_packus_epi32(res_reg, res_reg);
res_reg = _mm256_min_epi16(res_reg, reg_max);
// Save the result
mm256_storeu2_epi64((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
&res_reg);
// Update the source by two rows
src_ptr += src_stride_unrolled;
dst_ptr += dst_stride_unrolled;
src_reg_m1001 = src_reg_1223;
src_reg_1 = src_reg_3;
}
}
static void vpx_highbd_filter_block1d8_v4_avx2(
const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
ptrdiff_t dst_stride, uint32_t height, const int16_t *kernel, int bd) {
// We will load two rows of pixels and rearrange them into the form
// ... s[1,0] s[0,0] s[0,0] s[-1,0]
// so that we can call multiply and add with the kernel partial output. Then
// we can call add with another row to get the output.
// Register for source s[-1:3, :]
__m256i src_reg_1, src_reg_2, src_reg_3;
// Interleaved rows of the source. lo is first half, hi second
__m256i src_reg_m10, src_reg_01, src_reg_12, src_reg_23;
__m256i src_reg_m1001_lo, src_reg_m1001_hi, src_reg_1223_lo, src_reg_1223_hi;
__m128i kernel_reg_128; // Kernel
__m256i kernel_reg, kernel_reg_23, kernel_reg_45; // Segments of kernel
// Result after multiply and add
__m256i res_reg, res_reg_lo, res_reg_hi;
const __m256i reg_round =
_mm256_set1_epi32(CONV8_ROUNDING_NUM); // Used for rounding
const __m256i reg_max = _mm256_set1_epi16((1 << bd) - 1);
const ptrdiff_t src_stride_unrolled = src_stride << 1;
const ptrdiff_t dst_stride_unrolled = dst_stride << 1;
int h;
// Load Kernel
kernel_reg_128 = _mm_loadu_si128((const __m128i *)kernel);
kernel_reg = _mm256_broadcastsi128_si256(kernel_reg_128);
kernel_reg_23 = _mm256_shuffle_epi32(kernel_reg, 0x55);
kernel_reg_45 = _mm256_shuffle_epi32(kernel_reg, 0xaa);
// Row -1 to row 0
src_reg_m10 = mm256_loadu2_si128((const __m128i *)src_ptr,
(const __m128i *)(src_ptr + src_stride));
// Row 0 to row 1
src_reg_1 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 2)));
src_reg_01 = _mm256_permute2x128_si256(src_reg_m10, src_reg_1, 0x21);
// First three rows
src_reg_m1001_lo = _mm256_unpacklo_epi16(src_reg_m10, src_reg_01);
src_reg_m1001_hi = _mm256_unpackhi_epi16(src_reg_m10, src_reg_01);
for (h = height; h > 1; h -= 2) {
src_reg_2 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 3)));
src_reg_12 = _mm256_inserti128_si256(src_reg_1,
_mm256_castsi256_si128(src_reg_2), 1);
src_reg_3 = _mm256_castsi128_si256(
_mm_loadu_si128((const __m128i *)(src_ptr + src_stride * 4)));
src_reg_23 = _mm256_inserti128_si256(src_reg_2,
_mm256_castsi256_si128(src_reg_3), 1);
// Last three rows
src_reg_1223_lo = _mm256_unpacklo_epi16(src_reg_12, src_reg_23);
src_reg_1223_hi = _mm256_unpackhi_epi16(src_reg_12, src_reg_23);
// Output from first half
res_reg_lo = mm256_madd_add_epi32(&src_reg_m1001_lo, &src_reg_1223_lo,
&kernel_reg_23, &kernel_reg_45);
// Output from second half
res_reg_hi = mm256_madd_add_epi32(&src_reg_m1001_hi, &src_reg_1223_hi,
&kernel_reg_23, &kernel_reg_45);
// Round the words
res_reg_lo =
mm256_round_epi32(&res_reg_lo, ®_round, CONV8_ROUNDING_BITS);
res_reg_hi =
mm256_round_epi32(&res_reg_hi, ®_round, CONV8_ROUNDING_BITS);
// Combine to get the result
res_reg = _mm256_packus_epi32(res_reg_lo, res_reg_hi);
res_reg = _mm256_min_epi16(res_reg, reg_max);
// Save the result
mm256_store2_si128((__m128i *)dst_ptr, (__m128i *)(dst_ptr + dst_stride),
&res_reg);
// Update the source by two rows
src_ptr += src_stride_unrolled;
dst_ptr += dst_stride_unrolled;
src_reg_m1001_lo = src_reg_1223_lo;
src_reg_m1001_hi = src_reg_1223_hi;
src_reg_1 = src_reg_3;
}
}
static void vpx_highbd_filter_block1d16_v4_avx2(
const uint16_t *src_ptr, ptrdiff_t src_stride, uint16_t *dst_ptr,
ptrdiff_t dst_stride, uint32_t height, const int16_t *kernel, int bd) {
vpx_highbd_filter_block1d8_v4_avx2(src_ptr, src_stride, dst_ptr, dst_stride,
height, kernel, bd);
vpx_highbd_filter_block1d8_v4_avx2(src_ptr + 8, src_stride, dst_ptr + 8,
dst_stride, height, kernel, bd);
}
// From vpx_dsp/x86/vpx_high_subpixel_8t_sse2.asm.
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h8_sse2;
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v8_sse2;
// From vpx_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm.
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h2_sse2;
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v2_sse2;
#define vpx_highbd_filter_block1d4_h8_avx2 vpx_highbd_filter_block1d4_h8_sse2
#define vpx_highbd_filter_block1d4_h2_avx2 vpx_highbd_filter_block1d4_h2_sse2
#define vpx_highbd_filter_block1d4_v8_avx2 vpx_highbd_filter_block1d4_v8_sse2
#define vpx_highbd_filter_block1d4_v2_avx2 vpx_highbd_filter_block1d4_v2_sse2
// Use the [vh]8 version because there is no [vh]4 implementation.
#define vpx_highbd_filter_block1d16_v4_avg_avx2 \
vpx_highbd_filter_block1d16_v8_avg_avx2
#define vpx_highbd_filter_block1d16_h4_avg_avx2 \
vpx_highbd_filter_block1d16_h8_avg_avx2
#define vpx_highbd_filter_block1d8_v4_avg_avx2 \
vpx_highbd_filter_block1d8_v8_avg_avx2
#define vpx_highbd_filter_block1d8_h4_avg_avx2 \
vpx_highbd_filter_block1d8_h8_avg_avx2
#define vpx_highbd_filter_block1d4_v4_avg_avx2 \
vpx_highbd_filter_block1d4_v8_avg_avx2
#define vpx_highbd_filter_block1d4_h4_avg_avx2 \
vpx_highbd_filter_block1d4_h8_avg_avx2
HIGH_FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , avx2, 0);
HIGH_FUN_CONV_1D(vert, y0_q4, y_step_q4, v,
src - src_stride * (num_taps / 2 - 1), , avx2, 0);
HIGH_FUN_CONV_2D(, avx2, 0);
// From vpx_dsp/x86/vpx_high_subpixel_8t_sse2.asm.
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h8_avg_sse2;
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v8_avg_sse2;
// From vpx_dsp/x86/vpx_high_subpixel_bilinear_sse2.asm.
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_h2_avg_sse2;
highbd_filter8_1dfunction vpx_highbd_filter_block1d4_v2_avg_sse2;
#define vpx_highbd_filter_block1d4_h8_avg_avx2 \
vpx_highbd_filter_block1d4_h8_avg_sse2
#define vpx_highbd_filter_block1d4_h2_avg_avx2 \
vpx_highbd_filter_block1d4_h2_avg_sse2
#define vpx_highbd_filter_block1d4_v8_avg_avx2 \
vpx_highbd_filter_block1d4_v8_avg_sse2
#define vpx_highbd_filter_block1d4_v2_avg_avx2 \
vpx_highbd_filter_block1d4_v2_avg_sse2
HIGH_FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, avx2, 1);
HIGH_FUN_CONV_1D(avg_vert, y0_q4, y_step_q4, v,
src - src_stride * (num_taps / 2 - 1), avg_, avx2, 1);
HIGH_FUN_CONV_2D(avg_, avx2, 1);
#undef HIGHBD_FUNC
| 28,604 |
2,151 |
<filename>third_party/blink/renderer/core/css/properties/longhands/scroll_snap_type_custom.cc
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/css/properties/longhands/scroll_snap_type.h"
#include "third_party/blink/renderer/core/css/css_value_pair.h"
#include "third_party/blink/renderer/core/css/parser/css_property_parser_helpers.h"
#include "third_party/blink/renderer/core/css/properties/computed_style_utils.h"
#include "third_party/blink/renderer/core/style/computed_style.h"
namespace blink {
namespace CSSLonghand {
const CSSValue* ScrollSnapType::ParseSingleValue(
CSSParserTokenRange& range,
const CSSParserContext& context,
const CSSParserLocalContext&) const {
CSSValueID axis_id = range.Peek().Id();
if (axis_id != CSSValueNone && axis_id != CSSValueX && axis_id != CSSValueY &&
axis_id != CSSValueBlock && axis_id != CSSValueInline &&
axis_id != CSSValueBoth)
return nullptr;
CSSValue* axis_value = CSSPropertyParserHelpers::ConsumeIdent(range);
if (range.AtEnd() || axis_id == CSSValueNone)
return axis_value;
CSSValueID strictness_id = range.Peek().Id();
if (strictness_id != CSSValueProximity && strictness_id != CSSValueMandatory)
return axis_value;
CSSValue* strictness_value = CSSPropertyParserHelpers::ConsumeIdent(range);
CSSValuePair* pair = CSSValuePair::Create(axis_value, strictness_value,
CSSValuePair::kDropIdenticalValues);
return pair;
}
const CSSValue* ScrollSnapType::CSSValueFromComputedStyleInternal(
const ComputedStyle& style,
const SVGComputedStyle&,
const LayoutObject*,
Node* styled_node,
bool allow_visited_style) const {
return ComputedStyleUtils::ValueForScrollSnapType(style.GetScrollSnapType(),
style);
}
} // namespace CSSLonghand
} // namespace blink
| 750 |
348 |
<filename>docs/data/leg-t1/078/07802406.json
{"nom":"Milon-la-Chapelle","circ":"2ème circonscription","dpt":"Yvelines","inscrits":260,"abs":104,"votants":156,"blancs":0,"nuls":0,"exp":156,"res":[{"nuance":"REM","nom":"<NAME>","voix":66},{"nuance":"LR","nom":"<NAME>","voix":54},{"nuance":"ECO","nom":"<NAME>","voix":7},{"nuance":"FI","nom":"M. <NAME>","voix":7},{"nuance":"FN","nom":"Mme <NAME>","voix":5},{"nuance":"DIV","nom":"M. <NAME>","voix":5},{"nuance":"DLF","nom":"Mme <NAME>","voix":3},{"nuance":"ECO","nom":"M. <NAME>","voix":3},{"nuance":"DIV","nom":"M. <NAME>","voix":2},{"nuance":"EXD","nom":"Mme <NAME>","voix":1},{"nuance":"ECO","nom":"M. <NAME>","voix":1},{"nuance":"DVD","nom":"<NAME>","voix":1},{"nuance":"DVD","nom":"Mme <NAME>","voix":1},{"nuance":"DVD","nom":"<NAME>","voix":0},{"nuance":"DVG","nom":"<NAME>","voix":0},{"nuance":"DVG","nom":"M. <NAME>","voix":0},{"nuance":"EXG","nom":"Mme <NAME>","voix":0}]}
| 386 |
879 |
<filename>sdk/src/main/java/org/zstack/sdk/UpdateVCenterResult.java
package org.zstack.sdk;
import org.zstack.sdk.VCenterInventory;
public class UpdateVCenterResult {
public VCenterInventory inventory;
public void setInventory(VCenterInventory inventory) {
this.inventory = inventory;
}
public VCenterInventory getInventory() {
return this.inventory;
}
}
| 144 |
2,231 |
<reponame>hakoptak/defold<gh_stars>1000+
// Copyright 2020 The Defold Foundation
// Licensed under the Defold License version 1.0 (the "License"); you may not use
// this file except in compliance with the License.
//
// You may obtain a copy of the License, together with FAQs at
// https://www.defold.com/license
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef DMSDK_HASHTABLE_H
#define DMSDK_HASHTABLE_H
#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
/*# Hash table
*
* Hash table
*
* @document
* @name Hashtable
* @namespace dmHashTable
* @path engine/dlib/src/dmsdk/dlib/hashtable.h
*/
/*# hashtable
* Hashtable with chaining for collision resolution, memcpy-copy semantics (POD types) and 32-bit indicies instead of pointers. (NUMA-friendly)
* @note The key type needs to support == and % operators
* @type class
* @name dmHashTable
*/
template <typename KEY, typename T>
class dmHashTable
{
enum STATE_FLAGS
{
STATE_DEFAULT = 0x0,
STATE_USER_ALLOCATED = 0x1
};
public:
struct Entry
{
KEY m_Key;
T m_Value;
uint32_t m_Next;
};
/**
* Constructor. Create an empty hashtable with zero capacity and zero hashtable (buckets)
* @name dmHashTable
*/
dmHashTable()
{
memset(this, 0, sizeof(*this));
m_FreeEntries = 0xffffffff;
}
/**
* Creates a hashtable array with user allocated memory.
* @note User allocated arrays can not change capacity.
* @name dmHashTable
* @param user_allocated Pointer to user allocated continous data-block ((table_size*sizeof(uint32_t)) + (capacity*sizeof(dmHashTable::Entry))
* @param table_size Hashtable size, ie number of buckets. table_size < 0xffffffff
* @param capacity Capacity. capacity < 0xffffffff
*/
dmHashTable(void *user_allocated, uint32_t table_size, uint32_t capacity)
{
assert(table_size < 0xffffffff);
assert(capacity < 0xffffffff);
memset(this, 0, sizeof(*this));
m_FreeEntries = 0xffffffff;
m_HashTableSize = table_size;
m_HashTable = (uint32_t*) user_allocated;
memset(m_HashTable, 0xff, sizeof(uint32_t) * table_size);
m_InitialEntries = (Entry*) (m_HashTable + table_size);
m_InitialEntriesNextFree = m_InitialEntries;
m_InitialEntriesEnd = m_InitialEntries + capacity;
m_State = STATE_USER_ALLOCATED;
}
/**
* Removes all the entries from the table.
* @name Clear
*/
void Clear()
{
memset(m_HashTable, 0xff, sizeof(uint32_t) * m_HashTableSize);
m_InitialEntriesNextFree = m_InitialEntries;
m_FreeEntries = 0xffffffff;
m_Count = 0;
}
/**
* Destructor.
* @name ~dmHashTable
* @note If user allocated, memory is not free'd
*/
~dmHashTable()
{
if(!(m_State & STATE_USER_ALLOCATED))
{
if (m_InitialEntries)
{
free(m_InitialEntries);
}
if (m_HashTable)
{
free(m_HashTable);
}
}
}
/**
* Number of entries stored in table. (not the actual hashtable size)
* @name Size
* @return Number of entries.
*/
uint32_t Size()
{
return m_Count;
}
/**
* Hashtable capacity. Maximum number of entries possible to store in table
* @name Capacity
* @return [type: uint32_t] the capacity of the table
*/
uint32_t Capacity()
{
return (uint32_t)(uintptr_t)(m_InitialEntriesEnd - m_InitialEntries);
}
/**
* Set hashtable capacity. New capacity must be greater or equal to current capacity
* @name SetCapacity
* @param table_size Hashtable size, ie number of buckets. table_size < 0xffffffff
* @param capacity Capacity. capacity < 0xffffffff
*/
void SetCapacity(uint32_t table_size, uint32_t capacity)
{
assert(table_size > 0);
assert(table_size < 0xffffffff);
assert(capacity < 0xffffffff);
assert(capacity >= Capacity());
if (m_InitialEntries == 0)
{
m_HashTableSize = table_size;
m_HashTable = (uint32_t*) malloc(sizeof(uint32_t) * table_size);
memset(m_HashTable, 0xff, sizeof(uint32_t) * table_size);
m_InitialEntries = (Entry*) malloc(sizeof(Entry) * capacity);
m_InitialEntriesNextFree = m_InitialEntries;
m_InitialEntriesEnd = m_InitialEntries + capacity;
}
else
{
// Rehash table
dmHashTable<KEY, T> new_ht;
new_ht.SetCapacity(table_size, capacity);
this->Iterate<dmHashTable<KEY, T> >(&FillCallback<KEY, T>, &new_ht);
free(m_HashTable);
free(m_InitialEntries);
memcpy(this, &new_ht, sizeof(*this));
// Avoid double free()
new_ht.m_HashTable = 0;
new_ht.m_InitialEntries = 0;
}
}
/**
* Swaps the contents of two hash tables
* @name Swap
* @param other [type: dmHashTable<KEY, T>&] the other table
*/
void Swap(dmHashTable<KEY, T>& other)
{
char buf[sizeof(*this)];
memcpy(buf, &other, sizeof(buf));
memcpy(&other, this, sizeof(buf));
memcpy(this, buf, sizeof(buf));
}
/**
* Check if the table is full
* @name Full
* @return true if the table is full
*/
bool Full()
{
return m_Count == Capacity();
}
/**
* Check if the table is empty
* @name Empty
* @return true if the table is empty
*/
bool Empty()
{
return m_Count == 0;
}
/**
* Put key/value pair in hash table. NOTE: The method will "assert" if the hashtable is full.
* @name Put
* @param key [type: Key] Key
* @param value [type: const T&] Value
*/
void Put(KEY key, const T& value)
{
assert(!Full());
Entry* entry = FindEntry(key);
// Key already in table?
if (entry != 0)
{
// TODO: memcpy or similar to enforce memcpy-semantics?
entry->m_Value = value;
return;
}
else
{
entry = AllocateEntry();
entry->m_Key = key;
entry->m_Value = value;
entry->m_Next = 0xffffffff;
uint32_t bucket_index = (uint32_t) (key % m_HashTableSize);
uint32_t entry_ptr = m_HashTable[bucket_index];
if (entry_ptr == 0xffffffff)
{
m_HashTable[bucket_index] = (uint32_t)(uintptr_t)(entry - m_InitialEntries); // Store the index of the entry
}
else
{
// We need to traverse the list of entries for the bucket
Entry* prev_entry;
while (entry_ptr != 0xffffffff)
{
prev_entry = &m_InitialEntries[entry_ptr];
entry_ptr = prev_entry->m_Next;
}
assert(prev_entry->m_Next == 0xffffffff);
// Link prev entry to this
prev_entry->m_Next = (uint32_t)(uintptr_t)(entry - m_InitialEntries);
}
}
m_Count++;
}
/**
* Get pointer to value from key
* @name Get
* @param key [type: Key] Key
* @return value [type: T*] Pointer to value. NULL if the key/value pair doesn't exist.
*/
T* Get(KEY key)
{
Entry* entry = FindEntry(key);
// Key already in table?
if (entry != 0)
{
return &entry->m_Value;
}
else
{
return 0;
}
}
/**
* Get pointer to value from key. "const" version.
* @name Get
* @param key [type: Key] Key
* @return value [type: const T*] Pointer to value. NULL if the key/value pair doesn't exist.
*/
const T* Get(KEY key) const
{
Entry* entry = FindEntry(key);
// Key already in table?
if (entry != 0)
{
return &entry->m_Value;
}
else
{
return 0;
}
}
/**
* Get pointer to first entry in table
* @return Pointer to first entry. NULL if the table is empty.
*/
// Entry* GetFirstEntry()
// {
// if(Empty())
// return 0;
// return m_InitialEntries;
// }
/**
* Remove key/value pair.
* @name Get
* @param key [type: Key] Key to remove
* @note Only valid if key exists in table
*/
void Erase(KEY key)
{
// Avoid module division by zero
assert(m_HashTableSize != 0);
uint32_t bucket_index = key % m_HashTableSize;
uint32_t entry_ptr = m_HashTable[bucket_index];
// Empty list?
assert(entry_ptr != 0xffffffff);
Entry* prev_e = 0;
while (entry_ptr != 0xffffffff)
{
Entry* e = &m_InitialEntries[entry_ptr];
if (e->m_Key == key)
{
--m_Count;
// The entry first in list?
if (prev_e == 0)
{
// Relink
m_HashTable[bucket_index] = e->m_Next;
}
else
{
// Relink, skip "this"
prev_e->m_Next = e->m_Next;
}
FreeEntry(e);
return;
}
entry_ptr = e->m_Next;
prev_e = e;
}
assert(false && "Key not found (erase)");
}
/**
* Iterate over all entries in table
* @name Iterate
* @param call_back Call-back called for every entry
* @param context Context
*/
template <typename CONTEXT>
void Iterate(void (*call_back)(CONTEXT *context, const KEY* key, T* value), CONTEXT* context)
{
for (uint32_t i = 0; i < m_HashTableSize; ++i)
{
if (m_HashTable[i] != 0xffffffff)
{
uint32_t entry_ptr = m_HashTable[i];
while (entry_ptr != 0xffffffff)
{
Entry*e = &m_InitialEntries[entry_ptr];
call_back(context, &e->m_Key, &e->m_Value);
entry_ptr = e->m_Next;
}
}
}
}
/**
* Verify internal structure. "assert" if invalid. For unit testing
*/
void Verify()
{
// Ensure that not items in free list is used
uint32_t free_ptr = m_FreeEntries;
while (free_ptr != 0xffffffff)
{
Entry* e = &m_InitialEntries[free_ptr];
// Check that free entry not is in table
Entry* found = FindEntry(e->m_Key);
if (found && found == e )
{
printf("Key '%d' in table but also key '%d' in free list.\n", found->m_Key, e->m_Key);
}
assert( found != e );
free_ptr = e->m_Next;
}
uint32_t real_count = 0;
for (uint32_t i = 0; i < m_HashTableSize; ++i)
{
if (m_HashTable[i] != 0xffffffff)
{
uint32_t entry_ptr = m_HashTable[i];
while (entry_ptr != 0xffffffff)
{
real_count++;
Entry*e = &m_InitialEntries[entry_ptr];
entry_ptr = e->m_Next;
}
}
}
assert(real_count == m_Count);
}
private:
// Forbid assignment operator and copy-constructor
dmHashTable(const dmHashTable<KEY, T>&);
const dmHashTable<KEY, T>& operator=(const dmHashTable<KEY, T>&);
template <typename KEY2, typename T2>
static void FillCallback(dmHashTable<KEY2,T2> *ht, const KEY2* key, T2* value)
{
ht->Put(*key, *value);
}
Entry* FindEntry(KEY key) const
{
// Avoid module division by zero
if (!m_HashTableSize)
return 0;
uint32_t bucket_index = (uint32_t) (key % m_HashTableSize);
uint32_t bucket = m_HashTable[bucket_index];
uint32_t entry_ptr = bucket;
while (entry_ptr != 0xffffffff)
{
Entry* e = &m_InitialEntries[entry_ptr];
if (e->m_Key == key)
{
return e;
}
entry_ptr = e->m_Next;
}
return 0;
}
Entry* AllocateEntry()
{
// Free entries in the initial "depot"?
if (m_InitialEntriesNextFree != m_InitialEntriesEnd)
{
return m_InitialEntriesNextFree++;
}
else
{
// No, pick an entry from the free list.
assert(m_FreeEntries != 0xffffffff && "No free entries in hashtable");
Entry*ret = &m_InitialEntries[m_FreeEntries];
m_FreeEntries = ret->m_Next;
return ret;
}
}
void FreeEntry(Entry* e)
{
// Empty list of entries?
if (m_FreeEntries == 0xffffffff)
{
m_FreeEntries = (uint32_t)(uintptr_t)(e - m_InitialEntries);
e->m_Next = 0xffffffff;
}
else
{
e->m_Next = m_FreeEntries;
m_FreeEntries = (uint32_t)(uintptr_t)(e - m_InitialEntries);
}
}
// The actual hash table
uint32_t* m_HashTable;
uint32_t m_HashTableSize;
// Pointer to all entries
Entry* m_InitialEntries;
// Pointer to next free entry
Entry* m_InitialEntriesNextFree;
// Pointer to last entry - exclusive
Entry* m_InitialEntriesEnd;
// Linked list of free entries.
uint32_t m_FreeEntries;
// Number of key/value pairs in table
uint32_t m_Count;
// state flags/info
uint16_t m_State : 1;
};
/*#
* Specialized hash table with [type:uint16_t] as keys
* @type class
* @name dmHashTable16
*/
template <typename T>
class dmHashTable16 : public dmHashTable<uint16_t, T> {};
/*#
* Specialized hash table with [type:uint32_t] as keys
* @type class
* @name dmHashTable32
*/
template <typename T>
class dmHashTable32 : public dmHashTable<uint32_t, T> {};
/*#
* Specialized hash table with [type:uint64_t] as keys
* @type class
* @name dmHashTable64
*/
template <typename T>
class dmHashTable64 : public dmHashTable<uint64_t, T> {};
#endif // DMSDK_HASHTABLE_H
| 7,148 |
323 |
<gh_stars>100-1000
package cn.huanzi.qch.springbootsecurity.sysshortcutmenu.service;
import cn.huanzi.qch.springbootsecurity.common.pojo.Result;
import cn.huanzi.qch.springbootsecurity.common.service.CommonServiceImpl;
import cn.huanzi.qch.springbootsecurity.sysshortcutmenu.pojo.SysShortcutMenu;
import cn.huanzi.qch.springbootsecurity.sysshortcutmenu.repository.SysShortcutMenuRepository;
import cn.huanzi.qch.springbootsecurity.sysshortcutmenu.vo.SysShortcutMenuVo;
import cn.huanzi.qch.springbootsecurity.util.CopyUtil;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import javax.persistence.EntityManager;
import javax.persistence.PersistenceContext;
import java.util.List;
@Service
@Transactional
public class SysShortcutMenuServiceImpl extends CommonServiceImpl<SysShortcutMenuVo, SysShortcutMenu, String> implements SysShortcutMenuService{
@PersistenceContext
private EntityManager em;
@Autowired
private SysShortcutMenuRepository sysShortcutMenuRepository;
@Override
public Result<List<SysShortcutMenuVo>> findByUserId(String userId) {
return Result.of(CopyUtil.copyList(sysShortcutMenuRepository.findByUserId(userId), SysShortcutMenuVo.class));
}
}
| 451 |
855 |
from django.conf.urls import include
from django.urls import path
from django.contrib import admin
from organizations.backends.modeled import ModelInvitation
from test_accounts.models import Account
admin.autodiscover()
app_name = "test_accounts"
urlpatterns = [
path(
"register/",
include(
ModelInvitation(org_model=Account, namespace="invitations").urls,
namespace="account_invitations",
),
)
]
| 173 |
1,338 |
<gh_stars>1000+
/*
* Copyright 2011-2013, <NAME>, <EMAIL>.
* Distributed under the terms of the MIT License.
*/
#include <stdlib.h>
#include "Protocol.h"
#include "Response.h"
#include "argv.h"
struct cmd_entry {
const char* name;
void (*func)(int argc, char **argv);
const char* help;
};
static void do_help(int argc, char** argv);
extern const char* __progname;
static const char* kProgramName = __progname;
static IMAP::Protocol sProtocol;
static void
error(const char* context, status_t status)
{
fprintf(stderr, "Error during %s: %s\n", context, strerror(status));
}
static void
usage()
{
printf("Usage: %s <server> <username> <password>\n", kProgramName);
exit(1);
}
// #pragma mark -
static void
do_select(int argc, char** argv)
{
const char* folder = "INBOX";
if (argc > 1)
folder = argv[1];
IMAP::SelectCommand command(folder);
status_t status = sProtocol.ProcessCommand(command);
if (status == B_OK) {
printf("Next UID: %" B_PRIu32 ", UID validity: %" B_PRIu32 "\n",
command.NextUID(), command.UIDValidity());
} else
error("select", status);
}
static void
do_folders(int argc, char** argv)
{
IMAP::FolderList folders;
BString separator;
status_t status = sProtocol.GetFolders(folders, separator);
if (status != B_OK) {
error("folders", status);
return;
}
for (size_t i = 0; i < folders.size(); i++) {
printf(" %s %s\n", folders[i].subscribed ? "*" : " ",
folders[i].folder.String());
}
}
static void
do_fetch(int argc, char** argv)
{
uint32 from = 1;
uint32 to;
uint32 flags = IMAP::kFetchAll;
if (argc < 2) {
printf("usage: %s [<from>] [<to>] [header|body]\n", argv[0]);
return;
}
if (argc > 2) {
if (!strcasecmp(argv[argc - 1], "header")) {
flags = IMAP::kFetchHeader;
argc--;
} else if (!strcasecmp(argv[argc - 1], "body")) {
flags = IMAP::kFetchBody;
argc--;
}
}
if (argc > 2) {
from = atoul(argv[1]);
to = atoul(argv[2]);
} else
from = to = atoul(argv[1]);
IMAP::FetchCommand command(from, to, flags | IMAP::kFetchFlags);
// A fetch listener that dumps everything to stdout
class Listener : public IMAP::FetchListener {
public:
virtual ~Listener()
{
}
virtual bool FetchData(uint32 fetchFlags, BDataIO& stream,
size_t& length)
{
fBuffer.SetSize(0);
char buffer[65535];
while (length > 0) {
ssize_t bytesRead = stream.Read(buffer,
min_c(sizeof(buffer), length));
if (bytesRead <= 0)
break;
fBuffer.Write(buffer, bytesRead);
length -= bytesRead;
}
// Null terminate the buffer
char null = '\0';
fBuffer.Write(&null, 1);
return true;
}
virtual void FetchedData(uint32 fetchFlags, uint32 uid, uint32 flags)
{
printf("================= UID %ld, flags %lx =================\n",
uid, flags);
puts((const char*)fBuffer.Buffer());
}
private:
BMallocIO fBuffer;
} listener;
command.SetListener(&listener);
status_t status = sProtocol.ProcessCommand(command);
if (status != B_OK) {
error("fetch", status);
return;
}
}
static void
do_flags(int argc, char** argv)
{
uint32 from = 1;
uint32 to;
if (argc < 2) {
printf("usage: %s [<from>] [<to>]\n", argv[0]);
return;
}
if (argc > 2) {
from = atoul(argv[1]);
to = atoul(argv[2]);
} else
to = atoul(argv[1]);
IMAP::MessageEntryList entries;
IMAP::FetchMessageEntriesCommand command(entries, from, to, true);
status_t status = sProtocol.ProcessCommand(command);
if (status != B_OK) {
error("flags", status);
return;
}
for (size_t i = 0; i < entries.size(); i++) {
printf("%10lu %8lu bytes, flags: %#lx\n", entries[i].uid,
entries[i].size, entries[i].flags);
}
}
static void
do_noop(int argc, char** argv)
{
IMAP::RawCommand command("NOOP");
status_t status = sProtocol.ProcessCommand(command);
if (status != B_OK)
error("noop", status);
}
static void
do_raw(int argc, char** argv)
{
// build command back again
char command[4096];
command[0] = '\0';
for (int i = 1; i < argc; i++) {
if (i > 1)
strlcat(command, " ", sizeof(command));
strlcat(command, argv[i], sizeof(command));
}
class RawCommand : public IMAP::Command, public IMAP::Handler {
public:
RawCommand(const char* command)
:
fCommand(command)
{
}
BString CommandString()
{
return fCommand;
}
bool HandleUntagged(IMAP::Response& response)
{
if (response.IsCommand(fCommand)) {
printf("-> %s\n", response.ToString().String());
return true;
}
return false;
}
private:
const char* fCommand;
};
RawCommand rawCommand(command);
status_t status = sProtocol.ProcessCommand(rawCommand);
if (status != B_OK)
error("raw", status);
}
static cmd_entry sBuiltinCommands[] = {
{"select", do_select, "Selects a mailbox, defaults to INBOX"},
{"folders", do_folders, "List of existing folders"},
{"flags", do_flags,
"List of all mail UIDs in the mailbox with their flags"},
{"fetch", do_fetch,
"Fetch mails via UIDs"},
{"noop", do_noop, "Issue a NOOP command (will report new messages)"},
{"raw", do_raw, "Issue a raw command to the server"},
{"help", do_help, "prints this help text"},
{"quit", NULL, "exits the application"},
{NULL, NULL, NULL},
};
static void
do_help(int argc, char** argv)
{
printf("Available commands:\n");
for (cmd_entry* command = sBuiltinCommands; command->name != NULL;
command++) {
printf("%8s - %s\n", command->name, command->help);
}
}
// #pragma mark -
int
main(int argc, char** argv)
{
if (argc < 4)
usage();
const char* server = argv[1];
const char* user = argv[2];
const char* password = argv[3];
bool useSSL = argc > 4;
uint16 port = useSSL ? 993 : 143;
BNetworkAddress address(AF_INET, server, port);
printf("Connecting to \"%s\" as %s%s, port %u\n", server, user,
useSSL ? " with SSL" : "", address.Port());
status_t status = sProtocol.Connect(address, user, password, useSSL);
if (status != B_OK) {
error("connect", status);
return 1;
}
while (true) {
printf("> ");
fflush(stdout);
char line[1024];
if (fgets(line, sizeof(line), stdin) == NULL)
break;
argc = 0;
argv = build_argv(line, &argc);
if (argv == NULL || argc == 0)
continue;
if (!strcmp(argv[0], "quit")
|| !strcmp(argv[0], "exit")
|| !strcmp(argv[0], "q"))
break;
int length = strlen(argv[0]);
bool found = false;
for (cmd_entry* command = sBuiltinCommands; command->name != NULL;
command++) {
if (!strncmp(command->name, argv[0], length)) {
command->func(argc, argv);
found = true;
break;
}
}
if (!found) {
fprintf(stderr, "Unknown command \"%s\". Type \"help\" for a "
"list of commands.\n", argv[0]);
}
free(argv);
}
return 0;
}
| 2,780 |
2,504 |
//*********************************************************
//
// Copyright (c) Microsoft. All rights reserved.
// This code is licensed under the MIT License (MIT).
// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
//
//*********************************************************
#include "pch.h"
#include "Scenario1_MoveCertificate.h"
#include "Scenario1_MoveCertificate.g.cpp"
using namespace winrt;
using namespace winrt::Windows::Foundation;
using namespace winrt::Windows::Foundation::Collections;
using namespace winrt::Windows::Security::Cryptography;
using namespace winrt::Windows::Security::Cryptography::Certificates;
using namespace winrt::Windows::UI::Xaml;
using namespace winrt::Windows::UI::Xaml::Controls;
using namespace winrt::Windows::UI::Xaml::Navigation;
namespace winrt::SDKTemplate::implementation
{
Scenario1_MoveCertificate::Scenario1_MoveCertificate() :
appStore{ CertificateStores::GetStoreByName(StandardCertificateStoreNames::Personal()) },
userStore{ CertificateStores::GetUserStoreByName(StandardCertificateStoreNames::Personal()) }
{
InitializeComponent();
}
fire_and_forget Scenario1_MoveCertificate::OnNavigatedTo(NavigationEventArgs const&)
{
auto lifetime = get_strong();
IVectorView<Certificate> certificates = co_await CertificateStores::FindAllAsync();
for (Certificate certificate : certificates)
{
ListViewItem item;
hstring serialNumberAsString = CryptographicBuffer::EncodeToHexString(CryptographicBuffer::CreateFromByteArray(certificate.SerialNumber()));
item.Content(box_value(L"Subject: " + certificate.Subject() + L", Serial number:" + serialNumberAsString));
item.Tag(certificate);
if (certificate.IsPerUser())
{
UserCertificateListView().Items().Append(item);
}
else
{
AppCertificateListView().Items().Append(item);
}
}
}
static void MoveItemBetweenListViews(ListViewItem const& item, ListView const& source, ListView const& destination)
{
uint32_t index;
if (source.Items().IndexOf(item, index))
{
source.Items().RemoveAt(index);
}
destination.Items().Append(item);
destination.SelectedItem(item);
destination.ScrollIntoView(item);
}
fire_and_forget Scenario1_MoveCertificate::MoveUserCertificateToAppCertificateStore(IInspectable const&, RoutedEventArgs const&)
{
auto lifetime = get_strong();
auto item = UserCertificateListView().SelectedItem().as<ListViewItem>();
if (item)
{
auto certificate = item.Tag().as<Certificate>();
appStore.Add(certificate);
if (co_await userStore.RequestDeleteAsync(certificate))
{
MoveItemBetweenListViews(item, UserCertificateListView(), AppCertificateListView());
}
else
{
appStore.Delete(certificate);
}
}
}
fire_and_forget Scenario1_MoveCertificate::MoveAppCertificateToUserCertificateStore(IInspectable const&, RoutedEventArgs const&)
{
auto lifetime = get_strong();
auto item = AppCertificateListView().SelectedItem().as<ListViewItem>();
if (item)
{
auto certificate = item.Tag().as<Certificate>();
if (co_await userStore.RequestAddAsync(certificate))
{
appStore.Delete(certificate);
MoveItemBetweenListViews(item, AppCertificateListView(), UserCertificateListView());
}
}
}
}
| 1,633 |
922 |
<reponame>clementpoiret/sparseml
# neuralmagic: no copyright
# flake8: noqa
# fmt: off
# isort: skip_file
#!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import torch
import faiss
from tqdm import tqdm
from typing import List
from elasticsearch import Elasticsearch
from transformers import (DPRContextEncoder, DPRContextEncoderTokenizer,
DPRQuestionEncoder, DPRQuestionEncoderTokenizer)
from chunker import DocumentChunker
from dense_document import DenseDocument
class DenseIndex():
def __init__(self, documents, context_tokenizer, context_model, query_tokenizer, query_model, index_name='dense-index', dimension=768):
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.index_name = index_name
self.faiss_index = faiss.IndexFlatIP(self.dimension)
self.prep_index()
self.generate_index(documents)
def prep_index(self):
self.es = Elasticsearch()
if self.es.indices.exists(self.index_name):
logging.warning(f'Deleting old index for {self.index_name}.')
self.es.indices.delete(self.index_name)
self.es.indices.create(index=self.index_name)
def generate_index(self, documents, max_passages: int = 5): #Works for passages because passages dataset will only have 1 chunk
self.documents = documents
self.doc_bodies = [doc.body for doc in self.documents]
self.passage_to_doc = {} #pasage_id to doc_id
self.passages = []
doc_id = 0
passage_id = 0
for doc_counter, doc_body in tqdm(enumerate(self.doc_bodies),total=len(self.doc_bodies)):
self.es.create(self.index_name, id=doc_id, body={'document': doc_body})
passages = self.chunk_document(doc_body)
for i in range(min(len(passages),max_passages)): #NEED to add a chunking strategy first P, Last P, Best P
self.passages.append(passages[i])
input_ids = self.context_tokenizer(passages[i], return_tensors='pt')['input_ids']
self.faiss_index.add(self.context_model(input_ids).pooler_output.detach().numpy())
self.passage_to_doc[passage_id] = doc_id
passage_id += 1
doc_id += 1
def dense_search(self, query: str, k: int = 10):
input_ids = self.query_tokenizer(query, return_tensors='pt')['input_ids']
vec_dists, vec_ids = self.faiss_index.search(self.query_model(input_ids).pooler_output.detach().numpy(), k=k)
vec_dists, vec_ids = list(vec_dists[0]), list(vec_ids[0])
vec_dists= list(map(float, vec_dists))
results = []
for dist, passage_id in zip(vec_dists, vec_ids):
document_id = self.passage_to_doc[passage_id]
result = {
'document': self.documents[document_id],
'document_id': document_id,
'passage': self.passages[passage_id],
'passage_id': int(passage_id),
'faiss_dist': dist
}
results.append(result)
return results
def sparse_search(self, query: str, k: int = 10):
body = {
'size': k,
'query': {
'match': {
'chunk': query
}
}
}
results = self.es.search(index=self.index_name, body=body)
hits = results['hits']['hits']
return hits
def hybrid_search(self, query: str, k: int = 10, dense_weight: float = 0.5):
results_index = {}
for sparse_result in self.sparse_search(query):
id, score = sparse_result['_id'], sparse_result['_score']
id = int(id)
results_index[id] = {'elastic_score': score}
for dense_result in self.dense_search(query):
id, score = dense_result['passage_id'], dense_result['faiss_dist']
if id in results_index:
results_index[id]['faiss_dist'] = score
else:
results_index[id] = {'faiss_dist': score, 'elastic_score': 0}
results = []
for passage_id, scores in results_index.items():
document_id = self.passage_to_doc[passage_id]
document = self.documents[document_id]
doc_profile = document.to_dict()
result = {
'document': doc_profile,
'document_id': document_id,
'passage': self.passages[passage_id],
'passage_id': int(passage_id),
'scores': scores
}
results.append(result)
return results
| 2,333 |
4,054 |
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepository;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import java.time.Duration;
import java.util.logging.Level;
/**
* This computes, for every application deployment
* - the current fraction of the application's global traffic it receives
* - the max fraction it can possibly receive, assuming traffic is evenly distributed over regions
* and max one region is down at any time. (We can let deployment.xml override these assumptions later).
*
* These two numbers are sent to a config server of each region where it is ultimately
* consumed by autoscaling.
*
* It depends on the traffic metrics collected by DeploymentMetricsMaintainer.
*
* @author bratseth
*/
public class TrafficShareUpdater extends ControllerMaintainer {
private final ApplicationController applications;
private final NodeRepository nodeRepository;
public TrafficShareUpdater(Controller controller, Duration duration) {
super(controller, duration);
this.applications = controller.applications();
this.nodeRepository = controller.serviceRegistry().configServer().nodeRepository();
}
@Override
protected double maintain() {
Exception lastException = null;
int attempts = 0;
int failures = 0;
for (var application : applications.asList()) {
for (var instance : application.instances().values()) {
for (var deployment : instance.deployments().values()) {
if ( ! deployment.zone().environment().isProduction()) continue;
if (shuttingDown()) return 1.0;
try {
attempts++;
updateTrafficFraction(instance, deployment);
}
catch (Exception e) {
// Some failures due to locked applications are expected and benign
failures++;
lastException = e;
}
}
}
}
double successFactor = asSuccessFactor(attempts, failures);
if ( successFactor == 0 )
log.log(Level.WARNING, "Could not update traffic share on any applications", lastException);
return successFactor;
}
private void updateTrafficFraction(Instance instance, Deployment deployment) {
double qpsInZone = deployment.metrics().queriesPerSecond();
double totalQps = instance.deployments().values().stream()
.filter(i -> i.zone().environment().isProduction())
.mapToDouble(i -> i.metrics().queriesPerSecond()).sum();
long prodRegions = instance.deployments().values().stream()
.filter(i -> i.zone().environment().isProduction())
.count();
double currentReadShare = totalQps == 0 ? 0 : qpsInZone / totalQps;
double maxReadShare = prodRegions < 2 ? 1.0 : 1.0 / ( prodRegions - 1.0);
if (currentReadShare > maxReadShare) // This can happen because the assumption of equal traffic
maxReadShare = currentReadShare; // distribution can be incorrect
nodeRepository.patchApplication(deployment.zone(), instance.id(), currentReadShare, maxReadShare);
}
}
| 1,499 |
34,430 |
<filename>library/src/main/java/com/bumptech/glide/load/model/StreamEncoder.java
package com.bumptech.glide.load.model;
import android.util.Log;
import androidx.annotation.NonNull;
import com.bumptech.glide.load.Encoder;
import com.bumptech.glide.load.Options;
import com.bumptech.glide.load.engine.bitmap_recycle.ArrayPool;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* An {@link com.bumptech.glide.load.Encoder} that can write an {@link java.io.InputStream} to disk.
*/
public class StreamEncoder implements Encoder<InputStream> {
private static final String TAG = "StreamEncoder";
private final ArrayPool byteArrayPool;
public StreamEncoder(ArrayPool byteArrayPool) {
this.byteArrayPool = byteArrayPool;
}
@Override
public boolean encode(@NonNull InputStream data, @NonNull File file, @NonNull Options options) {
byte[] buffer = byteArrayPool.get(ArrayPool.STANDARD_BUFFER_SIZE_BYTES, byte[].class);
boolean success = false;
OutputStream os = null;
try {
os = new FileOutputStream(file);
int read;
while ((read = data.read(buffer)) != -1) {
os.write(buffer, 0, read);
}
os.close();
success = true;
} catch (IOException e) {
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "Failed to encode data onto the OutputStream", e);
}
} finally {
if (os != null) {
try {
os.close();
} catch (IOException e) {
// Do nothing.
}
}
byteArrayPool.put(buffer);
}
return success;
}
}
| 644 |
3,670 |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implements special functions in TensorFlow."""
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import custom_gradient as tfp_custom_gradient
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'atan_difference',
'dawsn',
'erfcinv',
'erfcx',
'igammainv',
'igammacinv',
'round_exponential_bump_function',
'lambertw',
'lambertw_winitzki_approx',
'logerfc',
'logerfcx',
'log_gamma_correction',
'log_gamma_difference',
'lbeta',
'owens_t',
]
def atan_difference(x, y, name=None):
"""Difference of arctan(x) and arctan(y).
Computes arctan(x) - arctan(y) avoiding catastrophic cancellation. This is
by resorting to the identity:
```none
arctan(x) - arctan(y) = arctan((x - y) / (1 + x * y)) +
pi * sign(x) * 1_{x * y < -1)
```
where `1_A` is the indicator function on the set `A`.
For a derivation of this fact, see [1].
#### References
[1] <NAME>, Sum of Arctangents
https://sites.google.com/site/micdestefano/mathematics/trigonometry/sum-of-arctangents
Args:
x: Floating-point Tensor. Should be broadcastable with `y`.
y: Floating-point Tensor. Should be broadcastable with `x`.
name: Optional Python `str` naming the operation.
Returns:
z: Tensor of same shape and dtype as `x` and `y`.
"""
with tf.name_scope(name or 'atan_difference'):
dtype = dtype_util.common_dtype([x, y], tf.float32)
x = tf.convert_to_tensor(x, dtype=dtype)
y = tf.convert_to_tensor(y, dtype=dtype)
difference = tf.math.atan((x - y) / (1 + x * y))
difference = difference + tf.where(
x * y < - 1., np.pi * tf.math.sign(x), 0.)
difference = tf.where(
tf.math.equal(x * y, -1.), np.pi * tf.math.sign(x) / 2., difference)
return difference
def _dawsn_naive(x):
"""Returns the Dawson Integral computed at x elementwise."""
dtype = dtype_util.common_dtype([x], tf.float32)
numpy_dtype = dtype_util.as_numpy_dtype(dtype)
x = tf.convert_to_tensor(x, dtype=dtype)
n1 = [
1.13681498971755972054E-11,
8.49262267667473811108E-10,
1.94434204175553054283E-8,
9.53151741254484363489E-7,
3.07828309874913200438E-6,
3.52513368520288738649E-4,
-8.50149846724410912031E-4,
4.22618223005546594270E-2,
-9.17480371773452345351E-2,
9.99999999999999994612E-1]
d1 = [
2.40372073066762605484E-11,
1.48864681368493396752E-9,
5.21265281010541664570E-8,
1.27258478273186970203E-6,
2.32490249820789513991E-5,
3.25524741826057911661E-4,
3.48805814657162590916E-3,
2.79448531198828973716E-2,
1.58874241960120565368E-1,
5.74918629489320327824E-1,
1.00000000000000000539E0]
n2 = [
5.08955156417900903354E-1,
-2.44754418142697847934E-1,
9.41512335303534411857E-2,
-2.18711255142039025206E-2,
3.66207612329569181322E-3,
-4.23209114460388756528E-4,
3.59641304793896631888E-5,
-2.14640351719968974225E-6,
9.10010780076391431042E-8,
-2.40274520828250956942E-9,
3.59233385440928410398E-11]
d2 = [
1.00000000000000000000E0,
-6.31839869873368190192E-1,
2.36706788228248691528E-1,
-5.31806367003223277662E-2,
8.48041718586295374409E-3,
-9.47996768486665330168E-4,
7.81025592944552338085E-5,
-4.55875153252442634831E-6,
1.89100358111421846170E-7,
-4.91324691331920606875E-9,
7.18466403235734541950E-11]
n3 = [
-5.90592860534773254987E-1,
6.29235242724368800674E-1,
-1.72858975380388136411E-1,
1.64837047825189632310E-2,
-4.86827613020462700845E-4]
d3 = [
1.00000000000000000000E0,
-2.69820057197544900361E0,
1.73270799045947845857E0,
-3.93708582281939493482E-1,
3.44278924041233391079E-2,
-9.73655226040941223894E-4]
n1, d1, n2, d2, n3, d3 = [
[numpy_dtype(c) for c in lst] for lst in (n1, d1, n2, d2, n3, d3)]
abs_x = tf.math.abs(x)
result_small = abs_x * tf.math.polyval(
n1, tf.math.square(x)) / tf.math.polyval(d1, tf.math.square(x))
result_small = tf.math.sign(x) * result_small
inv_xsq = tf.math.reciprocal(tf.math.square(x))
result_medium = tf.math.reciprocal(abs_x) + inv_xsq * (
tf.math.polyval(n2, inv_xsq) / (abs_x * tf.math.polyval(d2, inv_xsq)))
result_medium = 0.5 * tf.math.sign(x) * result_medium
result_very_large = 0.5 * tf.math.sign(x) * tf.math.reciprocal(abs_x)
result_large = tf.math.reciprocal(abs_x) + inv_xsq * (
tf.math.polyval(n3, inv_xsq) / (abs_x * tf.math.polyval(d3, inv_xsq)))
result_large = 0.5 * tf.math.sign(x) * result_large
return tf.where(
abs_x < 3.25,
result_small,
tf.where(
abs_x < 6.25,
result_medium,
tf.where(
abs_x > 1e9,
result_very_large,
result_large)))
def _dawsn_fwd(x):
"""Compute output, aux (collaborates with _dawsn_bwd)."""
output = _dawsn_naive(x)
return output, (x,)
def _dawsn_bwd(aux, g):
"""Reverse mode impl for dawsn."""
x, = aux
y = _dawsn_custom_gradient(x)
return g * (1. - 2 * x * y)
def _dawsn_jvp(primals, tangents):
"""Computes JVP for dawsn (supports JAX custom derivative)."""
x, = primals
dx, = tangents
y = _dawsn_custom_gradient(x)
return y, dx * (1. - 2 * x * y)
@tfp_custom_gradient.custom_gradient(
vjp_fwd=_dawsn_fwd,
vjp_bwd=_dawsn_bwd,
jvp_fn=_dawsn_jvp)
def _dawsn_custom_gradient(x):
return _dawsn_naive(x)
def dawsn(x, name=None):
"""Computes Dawson's integral element-wise.
Dawson's integral is defined as `exp(-x**2) * int_0^x exp(t**2)`
with the domain of definition all real numbers.
This implementation is based on the Cephes math library.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
dawsn: dawsn evaluated at `x`. A Tensor with the same shape and same
dtype as `x`.
"""
with tf.name_scope(name or 'dawsn'):
return _dawsn_custom_gradient(x)
def erfcinv(z, name=None):
"""Computes the inverse of `tf.math.erfc` of `z` element-wise.
NOTE: This is mathematically equivalent to computing `erfinv(1 - x)`
however is more numerically stable.
Args:
z: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
erfcinv: erfcinv evaluated at `z`. A Tensor with the same shape and same
dtype as `z`.
"""
with tf.name_scope(name or 'erfcinv'):
z = tf.convert_to_tensor(z)
return -tf.math.ndtri(0.5 * z) * np.sqrt(0.5)
def _erfcx_naive(x):
"""Compute erfcx using a Chebyshev expansion."""
# The implementation is based on
# [1] <NAME> and <NAME>,
# Chebyshev approximation of (1 + 2 * x) * exp(x**2) * erfc(x)
# https://www.ams.org/journals/mcom/1981-36-153/S0025-5718-1981-0595058-X/
dtype = dtype_util.common_dtype([x], tf.float32)
numpy_dtype = dtype_util.as_numpy_dtype(dtype)
x = tf.convert_to_tensor(x, dtype=dtype)
x_abs = tf.math.abs(x)
# TODO(b/180390310): The approximation quality can be made better by sweeping
# the shift parameter '3.75'.
y = (x_abs - 3.75) / (x_abs + 3.75)
# The list of coefficients is taken from [1].
coeff = [
3e-21,
9.7e-20,
2.7e-20,
-2.187e-18,
-2.237e-18,
5.0681e-17,
7.4182e-17,
-1.250795e-15,
-1.864563e-15,
3.33478119e-14,
3.2525481e-14,
-9.65469675e-13,
1.94558685e-13,
2.8687950109e-11,
-6.3180883409e-11,
-7.75440020883e-10,
4.521959811218e-09,
1.0764999465671e-08,
-2.18864010492344e-07,
7.74038306619849e-07,
4.139027986073010e-06,
-6.9169733025012064e-05,
4.90775836525808632e-04,
-2.413163540417608191e-03,
9.074997670705265094e-03,
-2.6658668435305752277e-02,
5.9209939998191890498e-02,
-8.4249133366517915584e-02,
-4.590054580646477331e-03,
1.177578934567401754080,
]
result = -4e-21
previous_result = 0.
for i in range(len(coeff) - 1):
result, previous_result = (
2 * y * result - previous_result + coeff[i], result)
result = y * result - previous_result + coeff[len(coeff) - 1]
result = result / (1. + 2. * x_abs)
# The approximation is only valid for positive x, so flip the integral.
# TODO(b/180390310): Improve this approximation for negative values.
result = tf.where(
x < 0., 2. * tf.math.exp(tf.math.square(x)) - result, result)
result = tf.where(tf.math.equal(x, np.inf), numpy_dtype(1.), result)
return result
def _erfcx_fwd(x):
"""Compute output, aux (collaborates with _erfcx_bwd)."""
output = _erfcx_naive(x)
return output, (x,)
def _erfcx_bwd(aux, g):
x, = aux
y = _erfcx_custom_gradient(x)
numpy_dtype = dtype_util.as_numpy_dtype(
dtype_util.common_dtype([x], tf.float32))
px = 2. * x * y - numpy_dtype(2. / np.sqrt(np.pi))
return [px * g]
def _erfcx_jvp(primals, tangents):
"""Computes JVP for erfcx (supports JAX custom derivative)."""
x, = primals
dx, = tangents
y = _erfcx_custom_gradient(x)
numpy_dtype = dtype_util.as_numpy_dtype(
dtype_util.common_dtype([x], tf.float32))
px = 2. * x * y - numpy_dtype(2. / np.sqrt(np.pi))
return y, px * dx
@tfp_custom_gradient.custom_gradient(
vjp_fwd=_erfcx_fwd,
vjp_bwd=_erfcx_bwd,
jvp_fn=_erfcx_jvp)
def _erfcx_custom_gradient(x):
"""Computes Erfcx(x) with correct custom gradient."""
return _erfcx_naive(x)
def erfcx(x, name=None):
"""Computes the scaled complementary error function exp(x**) * erfc(x).
# References
[1] <NAME> and <NAME>,
Chebyshev approximation of (1 + 2 * x) * exp(x**2) * erfc(x)
https://www.ams.org/journals/mcom/1981-36-153/S0025-5718-1981-0595058-X/
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
erfcx: erfcx(x) evaluated at `x`. A Tensor with the same shape and same
dtype as `x`.
"""
with tf.name_scope(name or 'logerfc'):
dtype = dtype_util.common_dtype([x], tf.float32)
x = tf.convert_to_tensor(x, dtype=dtype)
return _erfcx_custom_gradient(x)
def logerfc(x, name=None):
"""Computes the logarithm of `tf.math.erfc` of `x` element-wise.
NOTE: This is mathematically equivalent to computing `log(erfc(x))`
however is more numerically stable.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
logerfc: log(erfc(x)) evaluated at `x`. A Tensor with the same shape and
same dtype as `x`.
"""
with tf.name_scope(name or 'logerfc'):
dtype = dtype_util.common_dtype([x], tf.float32)
x = tf.convert_to_tensor(x, dtype=dtype)
safe_positive_x = tf.where(x > 0., x, 1.)
safe_negative_x = tf.where(x < 0., x, -1.)
return tf.where(
x < 0.,
tf.math.log(tf.math.erfc(safe_negative_x)),
# erfcx saturates to zero much slower than erfc.
tf.math.log(erfcx(safe_positive_x)) - tf.math.square(safe_positive_x))
def logerfcx(x, name=None):
"""Computes the logarithm of `tfp.math.erfcx` of `x` element-wise.
NOTE: This is mathematically equivalent to computing `log(erfcx(x))`
however is more numerically stable.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
logerfcx: log(erfcx(x)) evaluated at `x`. A Tensor with the same shape and
same dtype as `x`.
"""
with tf.name_scope(name or 'logerfc'):
dtype = dtype_util.common_dtype([x], tf.float32)
x = tf.convert_to_tensor(x, dtype=dtype)
safe_positive_x = tf.where(x > 0., x, 1.)
safe_negative_x = tf.where(x < 0., x, -1.)
return tf.where(
x < 0.,
# erfcx goes to infinity fast in the left tail.
tf.math.log(
tf.math.erfc(safe_negative_x)) + tf.math.square(safe_negative_x),
tf.math.log(erfcx(safe_positive_x)))
# Implementation of Inverse Incomplete Gamma based on
# <NAME> and <NAME>,
# Computation of the Incomplete Gamma Function Ratios and their Inverse
# https://dl.acm.org/doi/10.1145/22721.23109
def _didonato_eq_twenty_three(log_b, v, a):
return -log_b + tf.math.xlogy(a - 1., v) - tf.math.log1p((1. - a) / (1. + v))
def _didonato_eq_thirty_two(p, q):
"""Compute Equation 32 from Didonato's paper."""
dtype = dtype_util.common_dtype([p, q], tf.float32)
numpy_dtype = dtype_util.as_numpy_dtype(dtype)
numerator_coeffs = [
0.213623493715853, 4.28342155967104, 11.6616720288968, 3.31125922108741]
numerator_coeffs = [numpy_dtype(c) for c in numerator_coeffs]
denominator_coeffs = [
0.36117081018842e-1, 1.27364489782223, 6.40691597760039,
6.61053765625462, 1.]
denominator_coeffs = [numpy_dtype(c) for c in denominator_coeffs]
t = tf.where(
p < 0.5,
tf.math.sqrt(-2 * tf.math.log(p)),
tf.math.sqrt(-2. * tf.math.log(q)))
result = (t - tf.math.polyval(numerator_coeffs, t) / tf.math.polyval(
denominator_coeffs, t))
return tf.where(p < 0.5, -result, result)
def _didonato_eq_thirty_four(a, x):
"""Compute Equation 34 from Didonato's paper."""
# This function computes `S_n` in equation thirty four.
dtype = dtype_util.common_dtype([a, x], tf.float32)
# TODO(b/178793508): Change this tolerance to be dtype dependent.
tolerance = 1e-4
def _taylor_series(should_stop, index, partial, series_sum):
partial = partial * x / (a + index)
series_sum = tf.where(should_stop, series_sum, series_sum + partial)
# TODO(b/178793508): Change the number of iterations to be dtype dependent.
should_stop = (partial < tolerance) | (index > 100)
return should_stop, index + 1, partial, series_sum
_, _, _, series_sum = tf.while_loop(
cond=lambda stop, *_: tf.reduce_any(~stop),
body=_taylor_series,
loop_vars=(
tf.zeros_like(a + x, dtype=tf.bool),
tf.cast(1., dtype=dtype),
tf.ones_like(a + x, dtype=dtype),
tf.ones_like(a + x, dtype=dtype)))
return series_sum
def _didonato_eq_twenty_five(a, y):
"""Compute Equation 25 from Didonato's paper."""
c1 = tf.math.xlogy(a - 1., y)
c1_sq = tf.math.square(c1)
c1_cub = c1_sq * c1
c1_fourth = tf.math.square(c1_sq)
a_sq = tf.math.square(a)
a_cub = a_sq * a
c2 = (a - 1.) * (1. + c1)
c3 = (a - 1.) * ((3. * a - 5.) / 2. + c1 * (a - 2. - c1 / 2.))
c4 = (a - 1.) * (
(c1_cub / 3.) - (3. * a - 5.) * c1_sq / 2. +
(a_sq - 6. * a + 7.) * c1 + (11. * a_sq - 46. * a + 47.) / 6.)
c5 = ((a - 1.) * (-c1_fourth / 4. +
(11. * a - 17.) * c1_cub / 6 +
(-3. * a_sq + 13. * a - 13.) * c1_sq +
(2. * a_cub - 25. * a_sq + 72. * a - 61.) * c1 / 2. +
(25. * a_cub - 195. * a_sq + 477 * a - 379) / 12.))
return y + c1 + (((c5 / y + c4) / y + c3 / y) + c2) / y
def _inverse_igamma_initial_approx(a, p, q, use_p_for_logq=True):
"""Compute an initial guess for `igammainv(a, p)`.
Compute an initial estimate of `igammainv(a, p)`. This will be further
refined by Newton-Halley iterations.
Args:
a: A positive `float` `Tensor`. Must be broadcastable with `p`.
p: A `float` `Tensor` whose entries lie in `[0, 1]`.
Must be broadcastable with `a`. This is `1 - q`.
q: A `float` `Tensor` whose entries lie in `[0, 1]`.
Must be broadcastable with `a`. This is `1 - p`.
use_p_for_logq: `bool` describing whether to compute
`log(q)` by using `log(1 - p)` or `log(q)`.
Default value: `True`.
Returns:
igamma_approx: Approximation to `igammainv(a, p)`.
"""
dtype = dtype_util.common_dtype([a, p, q], tf.float32)
numpy_dtype = dtype_util.as_numpy_dtype(dtype)
a = tf.convert_to_tensor(a, dtype=dtype)
p = tf.convert_to_tensor(p, dtype=dtype)
q = tf.convert_to_tensor(q, dtype=dtype)
lgamma_a = tf.math.lgamma(a)
# This ensures that computing log(1 - p) avoids roundoff errors. This is
# needed since igammacinv and igammainv both use this codepath,
if use_p_for_logq:
log_q = tf.math.log1p(-p)
else:
log_q = tf.math.log(q)
log_b = log_q + lgamma_a
result = _didonato_eq_twenty_five(a, -log_b)
# The code below is for when a < 1.
v = -log_b - (1. - a) * tf.math.log(-log_b)
v_sq = tf.math.square(v)
# This is Equation 24.
result = tf.where(
log_b > np.log(0.01),
-log_b - (1. - a) * tf.math.log(v) - tf.math.log(
(v_sq + 2. * (3. - a) * v + (2. - a) * (3 - a)) /
(v_sq + (5. - a) * v + 2.)),
result)
result = tf.where(
log_b >= np.log(0.15),
_didonato_eq_twenty_three(log_b, v, a),
result)
t = tf.math.exp(-np.euler_gamma - tf.math.exp(log_b))
u = t * tf.math.exp(t)
result = tf.where(
(a < 0.3) & (log_b >= np.log(0.35)),
t * tf.math.exp(u),
result)
# These are hand tuned constants to compute (p * Gamma(a + 1)) ** (1 / a)
# TODO(b/178793508): Change these bounds / computation to be dtype dependent.
# This is Equation 21.
u = tf.where((tf.math.exp(log_b) * q > 1e-8) & (q > 1e-5),
tf.math.pow(p * tf.math.exp(lgamma_a) * a,
tf.math.reciprocal(a)),
# When (1 - p) * Gamma(a) or (1 - p) is small,
# we can taylor expand Gamma(a + 1) ** 1 / a to get
# exp(-euler_gamma for the zeroth order term.
# Also p ** 1 / a = exp(log(p) / a) = exp(log(1 - q) / a)
# ~= exp(-q / a) resulting in the following expression.
tf.math.exp((-q / a) - np.euler_gamma))
result = tf.where(
(log_b > np.log(0.6)) | ((log_b >= np.log(0.45)) & (a >= 0.3)),
u / (1. - (u / (a + 1.))),
result)
# The code below is for when a < 1.
sqrt_a = tf.math.sqrt(a)
s = _didonato_eq_thirty_two(p, q)
s_sq = tf.math.square(s)
s_cub = s_sq * s
s_fourth = tf.math.square(s_sq)
s_fifth = s_fourth * s
# This is the Cornish-Fisher 6 term expansion for x (by viewing igammainv as
# the quantile function for the Gamma distribution). This is equation (31).
w = a + s * sqrt_a + (s_sq - 1.) / 3.
w = w + (s_cub - 7. * s) / (36. * sqrt_a)
w = w - (3. * s_fourth + 7. * s_sq - 16.) / (810 * a)
w = w + (9. * s_fifth + 256. * s_cub - 433. * s) / (38880 * a * sqrt_a)
# The code below is for when a > 1. and p > 0.5.
d = tf.math.maximum(numpy_dtype(2.), a * (a - 1.))
result_a_large_p_large = tf.where(
log_b <= -d * np.log(10.),
_didonato_eq_twenty_five(a, -log_b),
_didonato_eq_twenty_three(
log_b, _didonato_eq_twenty_three(log_b, w, a), a))
result_a_large_p_large = tf.where(w < 3. * a, w, result_a_large_p_large)
# TODO(b/178793508): Change these bounds / computation to be dtype dependent.
result_a_large_p_large = tf.where(
(a >= 500.) & (tf.math.abs(1. - w / a) < 1e-6),
w, result_a_large_p_large)
# The code below is for when a > 1. and p <= 0.5.
z = w
v = tf.math.log(p) + tf.math.lgamma(a + 1.)
# The code below follows Equation 35 which involves multiple evaluations of
# F_i.
modified_z = tf.math.exp((v + w) / a)
for _ in range(2):
s = tf.math.log1p(
modified_z / (a + 1.) * (
1. + modified_z / (a + 2.)))
modified_z = tf.math.exp(
(v + modified_z - s) / a)
s = tf.math.log1p(
modified_z / (a + 1.) * (1. + modified_z / (a + 2.) * (
1. + modified_z / (a + 3.))))
modified_z = tf.math.exp((v + modified_z - s) / a)
z = tf.where(w <= 0.15 * (a + 1.), modified_z, z)
ls = tf.math.log(_didonato_eq_thirty_four(a, z))
medium_z = tf.math.exp((v + z - ls) / a)
result_a_large_p_small = tf.where(
(z <= 0.01 * (a + 1.)) | (z > 0.7 * (a + 1.)),
z,
medium_z * (
1. - (
a * tf.math.log(medium_z) - medium_z - v + ls) / (a - medium_z)))
result_a_large = tf.where(
p <= 0.5, result_a_large_p_small, result_a_large_p_large)
result = tf.where(a < 1., result, result_a_large)
# This ensures that computing log(1 - p) avoids roundoff errors. This is
# needed since igammacinv and igammainv both use this codepath,
# switching p and q.
result = tf.where(tf.math.equal(a, 1.), -log_q, result)
return result
def _shared_igammainv_computation(a, p, is_igammainv=True):
"""Shared computation for the igammainv/igammacinv."""
dtype = dtype_util.common_dtype([a, p], tf.float32)
numpy_dtype = dtype_util.as_numpy_dtype(dtype)
if is_igammainv:
q = 1. - p
else:
q = p
p = 1. - q
x = _inverse_igamma_initial_approx(a, p, q, use_p_for_logq=is_igammainv)
# Run 3 steps of Newton-Halley method.
for _ in range(3):
factorial = tf.math.exp(a * tf.math.log(x) - x - tf.math.lgamma(a))
f_over_der = tf.where(
((p <= 0.9) & is_igammainv) | ((q > 0.9) & (not is_igammainv)),
(tf.math.igamma(a, x) - p) * x / factorial,
-(tf.math.igammac(a, x) - q) * x / factorial)
second_der_over_der = -1. + (a - 1.) / x
modified_x = tf.where(
tf.math.is_inf(second_der_over_der),
# Use Newton's method if the second derivative is not available.
x - f_over_der,
# Use Halley's method otherwise. Halley's method is:
# x_{n+1} = x_n - f(x_n) / f'(x_n) * (
# 1 - f(x_n) / f'(x_n) * 0.5 f''(x_n) / f'(x_n))
x - f_over_der / (1. - 0.5 * f_over_der * second_der_over_der))
x = tf.where(tf.math.equal(factorial, 0.), x, modified_x)
x = tf.where((a < 0.) | (p < 0.) | (p > 1.), numpy_dtype(np.nan), x)
x = tf.where(tf.math.equal(p, 0.), numpy_dtype(0.), x)
x = tf.where(tf.math.equal(p, 1.), numpy_dtype(np.inf), x)
return x
def _igammainv_fwd(a, p):
"""Compute output, aux (collaborates with _igammainv_bwd)."""
output = _shared_igammainv_computation(a, p, is_igammainv=True)
return output, (a, p)
def _igammainv_partials(a, x):
"""Compute partial derivatives of `igammainv(a, x)`."""
# Partials for igamma.
# This function does not have gradients in TF, and thus using
# `stop_gradient` does not change behavior in TF.
# Ideally, it would be nice to throw an exception when taking gradients of
# this function in JAX mode, but this is not possible at the moment with
# `custom_jvp`. See https://github.com/google/jax/issues/5913 for details.
# TODO(https://github.com/google/jax/issues/5913): remove stop_gradients.
igamma_partial_a = tf.raw_ops.IgammaGradA(
a=tf.stop_gradient(a), x=tf.stop_gradient(x))
igamma_partial_x = tf.math.exp(
-x + tf.math.xlogy(a - 1., x) - tf.math.lgamma(a))
# Use the fact that igamma and igammainv are inverses of each other to compute
# the gradients.
igammainv_partial_a = -igamma_partial_a / igamma_partial_x
igammainv_partial_x = tf.math.reciprocal(igamma_partial_x)
return igammainv_partial_a, igammainv_partial_x
def _igammainv_bwd(aux, g):
"""Reverse mode impl for igammainv."""
a, p = aux
x = _igammainv_custom_gradient(a, p)
# Use the fact that igamma and igammainv are inverses to compute the gradient.
pa, pp = _igammainv_partials(a, x)
return _fix_gradient_for_broadcasting(a, p, pa * g, pp * g)
def _igammainv_jvp(primals, tangents):
"""Computes JVP for igammainv (supports JAX custom derivative)."""
a, p = primals
da, dp = tangents
# TODO(https://github.com/google/jax/issues/3768): eliminate broadcast_to?
bc_shp = prefer_static.broadcast_shape(prefer_static.shape(da),
prefer_static.shape(dp))
da = tf.broadcast_to(da, bc_shp)
dp = tf.broadcast_to(dp, bc_shp)
x = _igammainv_custom_gradient(a, p)
pa, pp = _igammainv_partials(a, x)
return x, pa * da + pp * dp
@tfp_custom_gradient.custom_gradient(
vjp_fwd=_igammainv_fwd,
vjp_bwd=_igammainv_bwd,
jvp_fn=_igammainv_jvp)
def _igammainv_custom_gradient(a, p):
return _shared_igammainv_computation(a, p, is_igammainv=True)
def igammainv(a, p, name=None):
"""Computes the inverse to `tf.math.igamma` with respect to `p`.
This function is defined as the solution `x` to the equation
`p = tf.math.igamma(a, x)`.
# References
[1] <NAME> and <NAME>,
Computation of the Incomplete Gamma Function Ratios and their Inverse
https://dl.acm.org/doi/10.1145/22721.23109
Args:
a: A positive `float` `Tensor`. Must be broadcastable with `p`.
p: A `float` `Tensor` whose entries lie in `[0, 1]`.
Must be broadcastable with `a`.
name: Optional Python `str` naming the operation.
Returns:
igammainv: igammainv(a, p). Has same type as `a`.
"""
with tf.name_scope(name or 'igammainv'):
dtype = dtype_util.common_dtype([a, p], tf.float32)
a = tf.convert_to_tensor(a, dtype=dtype)
p = tf.convert_to_tensor(p, dtype=dtype)
return _igammainv_custom_gradient(a, p)
def _igammacinv_fwd(a, p):
"""Compute output, aux (collaborates with _igammacinv_bwd)."""
output = _shared_igammainv_computation(a, p, is_igammainv=False)
return output, (a, p)
def _igammacinv_bwd(aux, g):
"""Reverse mode impl for igammacinv."""
a, p = aux
x = _igammacinv_custom_gradient(a, p)
pa, pp = _igammainv_partials(a, x)
pp = -pp
return _fix_gradient_for_broadcasting(a, p, pa * g, pp * g)
def _igammacinv_jvp(primals, tangents):
"""Computes JVP for igammacinv (supports JAX custom derivative)."""
a, p = primals
da, dp = tangents
# TODO(https://github.com/google/jax/issues/3768): eliminate broadcast_to?
bc_shp = prefer_static.broadcast_shape(prefer_static.shape(da),
prefer_static.shape(dp))
da = tf.broadcast_to(da, bc_shp)
dp = tf.broadcast_to(dp, bc_shp)
x = _igammacinv_custom_gradient(a, p)
pa, pp = _igammainv_partials(a, x)
pp = -pp
return x, pa * da + pp * dp
@tfp_custom_gradient.custom_gradient(
vjp_fwd=_igammacinv_fwd,
vjp_bwd=_igammacinv_bwd,
jvp_fn=_igammacinv_jvp)
def _igammacinv_custom_gradient(a, p):
return _shared_igammainv_computation(a, p, is_igammainv=False)
def igammacinv(a, p, name=None):
"""Computes the inverse to `tf.math.igammac` with respect to `p`.
This function is defined as the solution `x` to the equation
`p = tf.math.igammac(a, x)`.
# References
[1] <NAME> and <NAME>,
Computation of the Incomplete Gamma Function Ratios and their Inverse
https://dl.acm.org/doi/10.1145/22721.23109
Args:
a: A positive `float` `Tensor`. Must be broadcastable with `p`.
p: A `float` `Tensor` whose entries lie in `[0, 1]`.
Must be broadcastable with `a`.
name: Optional Python `str` naming the operation.
Returns:
igammacinv: igammacinv(a, p). Has same type as `a`.
"""
with tf.name_scope(name or 'igammacinv'):
dtype = dtype_util.common_dtype([a, p], tf.float32)
a = tf.convert_to_tensor(a, dtype=dtype)
p = tf.convert_to_tensor(p, dtype=dtype)
return _igammacinv_custom_gradient(a, p)
def round_exponential_bump_function(x, name=None):
r"""Function supported on [-1, 1], smooth on the real line, with a round top.
Define
```
f(x) := exp(-1 / (1 - x**2)) * exp(1), for x in (-1, 1)
f(x) := 0, for |x| >= 1.
```
One can show that f(x)...
* is C^\infty on the real line.
* is supported on [-1, 1].
* is equal to 1 at x = 0.
* is strictly increasing on (-1, 0).
* is strictly decreasing on (0, 1).
* has gradient = 0 at 0.
See [Bump Function](https://en.wikipedia.org/wiki/Bump_function)
Args:
x: Floating-point Tensor.
name: Optional Python `str` naming the operation.
Returns:
y: Tensor of same shape and dtype as `x`.
"""
with tf.name_scope(name or 'round_exponential_bump_function'):
x = tf.convert_to_tensor(x, name='x')
one_m_x2 = 1 - x**2
y = tf.math.exp(1. - tf.math.reciprocal_no_nan(one_m_x2))
return tf.where(one_m_x2 > 0., y, 0.)
def lambertw_winitzki_approx(z, name=None):
"""Computes Winitzki approximation to Lambert W function at z >= -1/exp(1).
The approximation for z >= -1/exp(1) will be used as a starting point in the
iterative algorithm to compute W(z). See _lambertw_principal_branch() below.
See
https://www.researchgate.net/post/Is_there_approximation_to_the_LambertWx_function
and in particular (38) in
https://pdfs.semanticscholar.org/e934/24f33e2742016ef18c36a80788400d2f17b4.pdf
Args:
z: value for which W(z) should be computed. Expected z >= -1/exp(1). If not
then function will fail due to log(<0).
name: optionally pass name for output.
Returns:
lambertw_winitzki_approx: Approximation for W(z) for z >= -1/exp(1).
"""
with tf.name_scope(name or 'lambertw_winitzki_approx'):
z = tf.convert_to_tensor(z)
# See eq (38) here:
# https://pdfs.semanticscholar.org/e934/24f33e2742016ef18c36a80788400d2f17b4.pdf
# or (10) here:
# https://hal.archives-ouvertes.fr/hal-01586546/document
log1pz = tf.math.log1p(z)
return log1pz * (1. - tf.math.log1p(log1pz) / (2. + log1pz))
def _fritsch_iteration(unused_should_stop, z, w, tol):
"""Root finding iteration for W(z) using Fritsch iteration."""
# See Section 2.3 in https://arxiv.org/pdf/1209.0735.pdf
# Approximate W(z) by viewing iterative algorithm as multiplicative factor
#
# W(n+1) = W(n) * (1 + error)
#
# where error can be expressed as a function of z and W(n). See paper for
# details.
z = tf.convert_to_tensor(z)
w = tf.convert_to_tensor(w)
zn = tf.math.log(tf.abs(z)) - tf.math.log(tf.abs(w)) - w
wp1 = w + 1.0
q = 2. * wp1 * (wp1 + 2. / 3. * zn)
q_minus_2zn = q - 2. * zn
error = zn / wp1 * (1. + zn / q_minus_2zn)
# Check absolute tolerance (not relative). Here the iteration error is
# for relative tolerance, as W(n+1) = W(n) * (1 + error). Use
# W(n+1) - W(n) = W(n) * error to get absolute tolerance.
converged = abs(error * w) <= tol
should_stop_next = tf.reduce_all(converged)
return should_stop_next, w * (1. + error), z, tol
def _halley_iteration(unused_should_stop, w, z, tol, iteration_count):
"""Halley's method on root finding of w for the equation w * exp(w) = z."""
w = tf.convert_to_tensor(w)
z = tf.convert_to_tensor(z)
f = w - z * tf.math.exp(-w)
delta = f / (w + 1. - 0.5 * (w + 2.) * f / (w + 1.))
w_next = w - delta
converged = tf.math.abs(delta) <= tol * tf.math.abs(w_next)
# We bound the number of iterations to be at most a 100.
# When x is close to the branch point, the derivatives tend to very large
# values, which causes the iteration to be slow. For x <= 0., 100 iterations
# seems to be enough to guarantee a relative error of at most 1e-6.
# The Winitzki approximation has a relative error of at most
# 0.01. When x >= 0., the first through third derivatives are bounded such
# that coupled with the initial approximation, we are in the realm of cubic
# convergence.
should_stop_next = tf.reduce_all(converged) | (iteration_count >= 100)
return should_stop_next, w_next, z, tol, iteration_count + 1
def _lambertw_principal_branch(z, name=None):
"""Computes Lambert W of `z` element-wise at the principal (k = 0) branch.
The Lambert W function is the inverse of `z = y * tf.exp(y)` and is a
many-valued function. Here `y = W_0(z)`, where `W_0` is the Lambert W function
evaluated at the 0-th branch (aka principal branch).
Args:
z: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Default value: `None` (i.e., 'lambertw_principal_branch').
Returns:
lambertw_principal_branch: A Tensor with same shape and same dtype as `z`.
"""
with tf.name_scope(name or 'lambertw_principal_branch'):
z = tf.convert_to_tensor(z)
np_finfo = np.finfo(dtype_util.as_numpy_dtype(z.dtype))
tolerance = tf.convert_to_tensor(2. * np_finfo.resolution, dtype=z.dtype)
# Start while loop with the initial value at the approximate Lambert W
# solution, instead of 'z' (for z > -1 / exp(1)). Using 'z' has bad
# convergence properties especially for large z (z > 5).
z0 = tf.where(z > -np.exp(-1.), lambertw_winitzki_approx(z), z)
z0 = tf.while_loop(cond=lambda stop, *_: ~stop,
body=_halley_iteration,
loop_vars=(False, z0, z, tolerance, 0))[1]
return tf.cast(z0, dtype=z.dtype)
def _lambert_fwd(z):
"""Compute output, aux (collaborates with _lambert_bwd)."""
wz = _lambertw_principal_branch(z)
return wz, (z,)
def _lambert_bwd(aux, g):
"""Reverse mode impl for lambert."""
z, = aux
wz = _lambert_custom_gradient(z)
# At z = 0 the analytic expressions for the gradient results in a 0/0
# expression. However, the continuous expansion (l'Hospital rule) gives a
# derivative of 1.0 at z = 0. This case has to be handled separately with
# a where clause.
return g * tf.where(
tf.equal(z, 0.), tf.ones([], wz.dtype), wz / (z * (1. + wz)))
def _lambert_jvp(primals, tangents):
"""Computes JVP for lambert (supports JAX custom derivative)."""
z, = primals
dz, = tangents
wz = _lambert_custom_gradient(z)
# At z = 0 the analytic expressions for the gradient results in a 0/0
# expression. However, the continuous expansion (l'Hospital rule) gives a
# derivative of 1.0 at z = 0. This case has to be handled separately with
# a where clause.
pz = tf.where(tf.equal(z, 0.), tf.ones([], wz.dtype), wz / (z * (1. + wz)))
return wz, pz * dz
@tfp_custom_gradient.custom_gradient(
vjp_fwd=_lambert_fwd,
vjp_bwd=_lambert_bwd,
jvp_fn=_lambert_jvp)
def _lambert_custom_gradient(z):
return _lambertw_principal_branch(z)
def lambertw(z, name=None):
"""Computes Lambert W of `z` element-wise.
The Lambert W function is the inverse of `z = u * exp(u)`, i. e., it is the
function that satisfies `u = W(z) * exp(W(z))`. The solution cannot be
expressed as a composition of elementary functions and is thus part of the
*special* functions in mathematics. See
https://en.wikipedia.org/wiki/Lambert_W_function.
In general it is a complex-valued function with multiple branches. The `k=0`
branch is known as the *principal branch* of the Lambert W function and is
implemented here. See also `scipy.special.lambertw`.
This code returns only the real part of the image of the Lambert W function.
# References
<NAME>., <NAME>., <NAME>. et al. On the LambertW function.
Adv Comput Math 5, 329-359 (1996) doi:10.1007/BF02124750
Args:
z: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
lambertw: The Lambert W function evaluated at `z`. A Tensor with same shape
and same dtype as `z`.
"""
with tf.name_scope(name or 'lambertw'):
z = tf.convert_to_tensor(z)
return _lambert_custom_gradient(z)
def log_gamma_correction(x, name=None):
"""Returns the error of the Stirling approximation to lgamma(x) for x >= 8.
This is useful for accurately evaluating ratios between Gamma functions, as
happens when trying to compute Beta functions.
Specifically,
```
lgamma(x) approx (x - 0.5) * log(x) - x + 0.5 log (2 pi)
+ log_gamma_correction(x)
```
for x >= 8.
This is the function called Delta in [1], eq (30). We implement it with
the rational minimax approximation given in [1], eq (32).
References:
[1] <NAME> Morris, "Significant Digit Computation of the Incomplete Beta
Function Ratios", 1988. Technical report NSWC TR 88-365, Naval Surface
Warfare Center (K33), Dahlgren, VA 22448-5000. Section IV, Auxiliary
Functions. https://apps.dtic.mil/dtic/tr/fulltext/u2/a210118.pdf
Args:
x: Floating-point Tensor at which to evaluate the log gamma correction
elementwise. The approximation is accurate when x >= 8.
name: Optional Python `str` naming the operation.
Returns:
lgamma_corr: Tensor of elementwise log gamma corrections.
"""
with tf.name_scope(name or 'log_gamma_correction'):
dtype = dtype_util.common_dtype([x], tf.float32)
x = tf.convert_to_tensor(x, dtype=dtype)
minimax_coeff = tf.constant([
0.833333333333333e-01,
-0.277777777760991e-02,
0.793650666825390e-03,
-0.595202931351870e-03,
0.837308034031215e-03,
-0.165322962780713e-02,
], dtype=dtype)
inverse_x = 1 / x
inverse_x_squared = inverse_x * inverse_x
accum = minimax_coeff[5]
for i in reversed(range(5)):
accum = accum * inverse_x_squared + minimax_coeff[i]
return accum * inverse_x
def _fix_gradient_for_broadcasting(a, b, grad_a, grad_b):
"""Reduces broadcast dimensions for a custom gradient."""
if (tensorshape_util.is_fully_defined(a.shape) and
tensorshape_util.is_fully_defined(b.shape) and
a.shape == b.shape):
return [grad_a, grad_b]
a_shape = tf.shape(a)
b_shape = tf.shape(b)
ra, rb = tf.raw_ops.BroadcastGradientArgs(s0=a_shape, s1=b_shape)
grad_a = tf.reshape(tf.reduce_sum(grad_a, axis=ra), a_shape)
grad_b = tf.reshape(tf.reduce_sum(grad_b, axis=rb), b_shape)
return [grad_a, grad_b]
def _log_gamma_difference_big_y(x, y):
"""Returns lgamma(y) - lgamma(x + y), accurately if 0 <= x <= y and y >= 8.
This is more accurate than subtracting lgammas directly because lgamma grows
as `x log(x) - x + o(x)`, and thus subtracting the value of lgamma for two
close, large arguments incurs catastrophic cancellation.
The method is to partition lgamma into the Stirling approximation and the
correction `log_gamma_correction`, symbolically cancel the former, and compute
and subtract the latter.
Args:
x: Floating-point Tensor. `x` should be non-negative, and elementwise no
more than `y`.
y: Floating-point Tensor. `y` should be elementwise no less than 8.
Returns:
lgamma_diff: Floating-point Tensor, the difference lgamma(y) - lgamma(x+y),
computed elementwise.
"""
cancelled_stirling = (-1 * (x + y - 0.5) * tf.math.log1p(x / y)
- x * tf.math.log(y) + x)
correction = log_gamma_correction(y) - log_gamma_correction(x + y)
return correction + cancelled_stirling
def _log_gamma_difference_naive_gradient(x, y):
big_y = _log_gamma_difference_big_y(x, y)
small_y = tf.math.lgamma(y) - tf.math.lgamma(x + y)
return tf.where(y >= 8, big_y, small_y)
def _log_gamma_difference_fwd(x, y):
"""Compute output, aux (collaborates with _log_gamma_difference_bwd)."""
return _log_gamma_difference_naive_gradient(x, y), (x, y)
def _log_gamma_difference_bwd(aux, g):
"""Reverse mode impl for log-gamma-diff."""
x, y = aux
# Computing the gradient naively as the difference of digammas because
# (i) digamma grows slower than gamma, so gets into bad cancellations
# later, and (ii) doing better is work. This matches what the gradient
# would be if the forward pass were computed naively as the difference
# of lgammas.
#
# Note: This gradient assumes x and y are the same shape; this needs to
# be arranged by pre-broadcasting before calling
# `_log_gamma_difference`.
px = -tf.math.digamma(x + y)
py = tf.math.digamma(y) + px
return _fix_gradient_for_broadcasting(x, y, px * g, py * g)
def _log_gamma_difference_jvp(primals, tangents):
"""Computes JVP for log-gamma-difference (supports JAX custom derivative)."""
x, y = primals
dx, dy = tangents
# TODO(https://github.com/google/jax/issues/3768): eliminate broadcast_to?
bc_shp = prefer_static.broadcast_shape(prefer_static.shape(dx),
prefer_static.shape(dy))
dx = tf.broadcast_to(dx, bc_shp)
dy = tf.broadcast_to(dy, bc_shp)
# See note above in _log_gamma_difference_bwd.
px = -tf.math.digamma(x + y)
py = tf.math.digamma(y) + px
return _log_gamma_difference_naive_gradient(x, y), px * dx + py * dy
@tfp_custom_gradient.custom_gradient(
vjp_fwd=_log_gamma_difference_fwd,
vjp_bwd=_log_gamma_difference_bwd,
jvp_fn=_log_gamma_difference_jvp)
def _log_gamma_difference_custom_gradient(x, y):
return _log_gamma_difference_naive_gradient(x, y)
def log_gamma_difference(x, y, name=None):
"""Returns lgamma(y) - lgamma(x + y), accurately.
This is more accurate than subtracting lgammas directly because lgamma grows
as `x log(x) - x + o(x)`, and thus subtracting the value of lgamma for two
close, large arguments incurs catastrophic cancellation.
When `y >= 8`, the method is to partition lgamma into the Stirling
approximation and the correction `log_gamma_correction`, symbolically cancel
the former, and compute and subtract the latter.
Args:
x: Floating-point Tensor. `x` should be non-negative, and elementwise no
more than `y`.
y: Floating-point Tensor. `y` should be positive.
name: Optional Python `str` naming the operation.
Returns:
lgamma_diff: Floating-point Tensor, the difference lgamma(y) - lgamma(x+y),
computed elementwise.
"""
with tf.name_scope(name or 'log_gamma_difference'):
dtype = dtype_util.common_dtype([x, y], tf.float32)
x = tf.convert_to_tensor(x, dtype=dtype)
y = tf.convert_to_tensor(y, dtype=dtype)
return _log_gamma_difference_custom_gradient(x, y)
def _lbeta_naive_gradient(x, y):
"""Computes log(Beta(x, y)) with autodiff gradients only."""
# Flip args if needed so y >= x. Beta is mathematically symmetric but our
# method for computing it is not.
x, y = tf.minimum(x, y), tf.maximum(x, y)
log2pi = tf.constant(np.log(2 * np.pi), dtype=x.dtype)
# Two large arguments case: y >= x >= 8.
log_beta_two_large = (0.5 * log2pi
- 0.5 * tf.math.log(y)
+ log_gamma_correction(x)
+ log_gamma_correction(y)
- log_gamma_correction(x + y)
+ (x - 0.5) * tf.math.log(x / (x + y))
- y * tf.math.log1p(x / y))
# One large argument case: x < 8, y >= 8.
log_beta_one_large = tf.math.lgamma(x) + _log_gamma_difference_big_y(x, y)
# Small arguments case: x <= y < 8.
log_beta_small = tf.math.lgamma(x) + tf.math.lgamma(y) - tf.math.lgamma(x + y)
# Reference [1] has two more arms, for cases where x or y falls into the
# interval (2, 8). In these cases, reference [1] recommends iteratively
# reducing the arguments using the identity
# B(x, y) = B(x - 1, y) * (x - 1) / (x + y - 1)
# so they fall in the interval [1, 2]. We choose not to do that here to avoid
# a TensorFlow while loop, and hope that subtracting lgammas will be accurate
# enough for the user's purposes.
return tf.where(x >= 8,
log_beta_two_large,
tf.where(y >= 8,
log_beta_one_large,
log_beta_small))
def _lbeta_fwd(x, y):
"""Compute output, aux (collaborates with _lbeta_bwd)."""
return _lbeta_naive_gradient(x, y), (x, y)
def _lbeta_bwd(aux, g):
x, y = aux
total_digamma = tf.math.digamma(x + y)
px = tf.math.digamma(x) - total_digamma
py = tf.math.digamma(y) - total_digamma
return _fix_gradient_for_broadcasting(x, y, px * g, py * g)
def _lbeta_jvp(primals, tangents):
"""Computes JVP for log-beta (supports JAX custom derivative)."""
x, y = primals
dx, dy = tangents
# TODO(https://github.com/google/jax/issues/3768): eliminate broadcast_to?
bc_shp = prefer_static.broadcast_shape(prefer_static.shape(dx),
prefer_static.shape(dy))
dx = tf.broadcast_to(dx, bc_shp)
dy = tf.broadcast_to(dy, bc_shp)
total_digamma = tf.math.digamma(x + y)
px = tf.math.digamma(x) - total_digamma
py = tf.math.digamma(y) - total_digamma
return _lbeta_naive_gradient(x, y), px * dx + py * dy
@tfp_custom_gradient.custom_gradient(
vjp_fwd=_lbeta_fwd,
vjp_bwd=_lbeta_bwd,
jvp_fn=_lbeta_jvp)
def _lbeta_custom_gradient(x, y):
"""Computes log(Beta(x, y)) with correct custom gradient."""
return _lbeta_naive_gradient(x, y)
@tf.function(autograph=False)
def lbeta(x, y, name=None):
"""Returns log(Beta(x, y)).
This is semantically equal to
lgamma(x) + lgamma(y) - lgamma(x + y)
but the method is more accurate for arguments above 8.
The reason for accuracy loss in the naive computation is catastrophic
cancellation between the lgammas. This method avoids the numeric cancellation
by explicitly decomposing lgamma into the Stirling approximation and an
explicit `log_gamma_correction`, and cancelling the large terms from the
Stirling analytically.
The computed gradients are the same as for the naive forward computation,
because (i) digamma grows much slower than lgamma, so cancellations aren't as
bad, and (ii) it's simpler and faster than trying to be more accurate.
References:
[1] DiDonato and Morris, "Significant Digit Computation of the Incomplete Beta
Function Ratios", 1988. Technical report NSWC TR 88-365, Naval Surface
Warfare Center (K33), Dahlgren, VA 22448-5000. Section IV, Auxiliary
Functions. https://apps.dtic.mil/dtic/tr/fulltext/u2/a210118.pdf
Args:
x: Floating-point Tensor.
y: Floating-point Tensor.
name: Optional Python `str` naming the operation.
Returns:
lbeta: Tensor of elementwise log beta(x, y).
"""
with tf.name_scope(name or 'tfp_lbeta'):
dtype = dtype_util.common_dtype([x, y], tf.float32)
x = tf.convert_to_tensor(x, dtype=dtype)
y = tf.convert_to_tensor(y, dtype=dtype)
return _lbeta_custom_gradient(x, y)
# The Owen's T implementation below is based on
# [1] <NAME>., <NAME>., Fast and Accurate Calcuation of Owen's T-Function
# Journal of Statistical Software http://www.jstatsoft.org/v05/i05/paper
def _owens_t_method1(h, a, m):
"""OwensT Method T1 using series expansions."""
# Method T1, which is evaluation of a particular series expansion of OwensT.
dtype = dtype_util.common_dtype([h, a], tf.float32)
numpy_dtype = dtype_util.as_numpy_dtype(dtype)
neg_half_h_squared = -0.5 * tf.math.square(h)
a_squared = tf.math.square(a)
def series_evaluation(
should_stop,
index,
ai,
di,
gi,
series_sum):
new_ai = a_squared * ai
new_di = gi - di
new_gi = neg_half_h_squared / index * gi
new_series_sum = tf.where(
should_stop, series_sum,
series_sum + new_di * new_ai / (2. * index - 1.))
should_stop = index >= m
return should_stop, index + 1., new_ai, new_di, new_gi, new_series_sum
initial_ai = a / numpy_dtype(2 * np.pi)
initial_di = tf.math.expm1(neg_half_h_squared)
initial_gi = neg_half_h_squared * tf.math.exp(neg_half_h_squared)
initial_sum = (
tf.math.atan(a) / numpy_dtype(2 * np.pi) + initial_ai * initial_di)
(_, _, _, _, _, series_sum) = tf.while_loop(
cond=lambda stop, *_: tf.reduce_any(~stop),
body=series_evaluation,
loop_vars=(
# Use constant-tensor multiplication rather than static or dynamic
# shape broadcasting logic, since the former will be robust to
# partially-static shapes.
tf.cast(
tf.zeros_like(h) * tf.zeros_like(a),
dtype=tf.bool),
tf.cast(2., dtype=dtype),
initial_ai,
initial_di,
initial_gi,
initial_sum))
return series_sum
def _owens_t_method2(h, a, m):
"""OwensT Method T2 using Power series."""
# Method T2, which is evaluation approximating the (1 + x^2)^-1 term in the
# denominator of the OwensT integrand via power series, and integrating this
# term by term to get a series expansion.
dtype = dtype_util.common_dtype([h, a], tf.float32)
numpy_dtype = dtype_util.as_numpy_dtype(dtype)
h_squared = tf.math.square(h)
nega_squared = -tf.math.square(a)
num_iterations = 2 * m + 1.
y = tf.math.reciprocal(h_squared)
def series_evaluation(
should_stop,
index,
summand,
term,
series_sum):
new_summand = y * (term - index * summand)
new_term = nega_squared * term
new_series_sum = tf.where(should_stop, series_sum, series_sum + new_summand)
should_stop = index >= num_iterations
return should_stop, index + 2., new_summand, new_term, new_series_sum
initial_summand = -0.5 * tf.math.erf(a * h) / h
initial_sum = initial_summand
initial_term = a * tf.math.exp(
-0.5 * tf.math.square(a * h)) / numpy_dtype(np.sqrt(2 * np.pi))
(_, _, _, _, series_sum) = tf.while_loop(
cond=lambda stop, *_: tf.reduce_any(~stop),
body=series_evaluation,
loop_vars=(
# Use constant-tensor multiplication rather than static or dynamic
# shape broadcasting logic, since the former will be robust to
# partially-static shapes.
tf.cast(
tf.zeros_like(h) * tf.zeros_like(a),
dtype=tf.bool),
tf.cast(1., dtype=dtype),
initial_summand,
initial_term,
initial_sum))
return (series_sum * tf.math.exp(-0.5 * h_squared) /
numpy_dtype(np.sqrt(2 * np.pi)))
def _owens_t_method3(h, a):
"""OwensT Method T3, using Chebyshev series."""
# Method T3, which is evaluation approximating the (1 + x^2)^-1 term in the
# denominator of the OwensT integrand via chebyshev series, and integrating
# this term by term to get a series expansion.
coefficients = np.array([
0.99999999999999999999999729978162447266851932041876728736094298092,
-0.9999999999999999999946705637967839181062653325188532341679987487,
0.99999999999999999824849349313270659391127814689133077036298754586,
-0.9999999999999997703859616213643405880166422891953033591551179153,
0.99999999999998394883415238173334565554173013941245103172035286759,
-0.9999999999993063616095509371081203145247992197457263066869044528,
0.99999999997973363404094644295992298705901604112382452758559037676,
-0.9999999995749584120690466801190516397534123780375655213594441702,
0.99999999332262341933753249439201609471582390767861031080974566177,
-0.9999999188923242461073033481053037468263536806742737922476636768,
0.99999921951434836744028537835494208830551296800829326291600811289,
-0.9999939351372067128309979219133169714722271997418573865750972505,
0.99996135597690552745362392866517133091672395614263398912807169603,
-0.9997955636651394602640678896963029382098775775864121129307978458,
0.99909278962961710015348625142385059005136666194734431542322608252,
-0.9965938374119182021193086204326146003381573358628885806714509388,
0.98910017138386127038463510314625339359073956513420458166238478926,
-0.9700785580406933145213319822037627715121601685824945133478464073,
0.92911438683263187495758525500033707204091967947532160289872782771,
-0.8542058695956156057286980736842905011429254735181323743367879525,
0.73796526033030091233118357742803709382964420335559408722681794195,
-0.5852346988283739457012859900378515414416468058761587864517163279,
0.41599777614567630616566166358186846050387420534301419658012217494,
-0.2588210875241943574388730510317252236407805082485246378222935376,
0.13755358251638926485046469515002655850557890194106175657270903465,
-0.0607952766325955730493900985022020434830339794955745989150270485,
0.02163376832998715280598364838403905142754886795307972945570602292,
-0.0059340569345518672987699581418120390055001422042884348392721826,
0.00117434148183329465104745761827392105533338601068118659634858706,
-1.4891556133503689340734532606898813301663424844055299815106940E-4,
9.07235432079435758771092950798881466945428151426884488484154734E-6])
a_squared = tf.math.square(a)
h_squared = tf.math.square(h)
y = tf.math.reciprocal(h_squared)
vi = a * tf.math.exp(-0.5 * tf.math.square(a * h)) / np.sqrt(2 * np.pi)
zi = 0.5 * tf.math.erf(a * h / np.sqrt(2.)) / h
result = 0.
for i in range(31):
result = result + zi * coefficients[i]
zi = y * ((2 * i + 1.) * zi - vi)
vi = a_squared * vi
return result * tf.math.exp(-0.5 * h_squared) / np.sqrt(2 * np.pi)
def _owens_t_method4(h, a, m):
"""OwensT Method T4, which is a reordered evaluation of method T2."""
dtype = dtype_util.common_dtype([h, a], tf.float32)
h_squared = tf.math.square(h)
nega_squared = -tf.math.square(a)
num_iterations = 2 * m + 1.
def series_evaluation(
should_stop,
index,
term,
coeff,
series_sum):
new_coeff = (1. - h_squared * coeff) / index
new_term = nega_squared * term
new_series_sum = tf.where(
should_stop, series_sum, series_sum + new_coeff * new_term)
should_stop = index >= num_iterations
return should_stop, index + 2., new_term, new_coeff, new_series_sum
initial_term = a * tf.math.exp(
-0.5 * h_squared * (1 - nega_squared)) / (2 * np.pi)
initial_sum = initial_term
(_, _, _, _, series_sum) = tf.while_loop(
cond=lambda stop, *_: tf.reduce_any(~stop),
body=series_evaluation,
loop_vars=(
tf.cast(
tf.zeros_like(h) * tf.zeros_like(a),
dtype=tf.bool),
tf.cast(3., dtype=dtype),
initial_term,
tf.ones_like(h) * tf.ones_like(a),
initial_sum))
return series_sum
def _owens_t_method5(h, a):
"""OwensT Method T5 which uses Gaussian Quadrature."""
# Method T5, which is a gaussian quadrature approximation of the integral.
# These are shifted and squared.
quadrature_points = np.array([
0.35082039676451715489E-02, 0.31279042338030753740E-01,
0.85266826283219451090E-01, 0.16245071730812277011E+00,
0.25851196049125434828E+00, 0.36807553840697533536E+00,
0.48501092905604697475E+00, 0.60277514152618576821E+00,
0.71477884217753226516E+00, 0.81475510988760098605E+00,
0.89711029755948965867E+00, 0.95723808085944261843E+00,
0.99178832974629703586E+00])
quadrature_weights = np.array([
0.18831438115323502887E-01, 0.18567086243977649478E-01,
0.18042093461223385584E-01, 0.17263829606398753364E-01,
0.16243219975989856730E-01, 0.14994592034116704829E-01,
0.13535474469662088392E-01, 0.11886351605820165233E-01,
0.10070377242777431897E-01, 0.81130545742299586629E-02,
0.60419009528470238773E-02, 0.38862217010742057883E-02,
0.16793031084546090448E-02])
r = tf.math.square(a[..., tf.newaxis]) * quadrature_points
log_integrand = -0.5 * tf.math.square(
h[..., tf.newaxis]) * (1. + r) - tf.math.log1p(r)
return tf.math.exp(tf.math.log(a) + tf.math.reduce_logsumexp(
log_integrand + np.log(quadrature_weights), axis=-1))
def _owens_t_method6(h, a):
# Method T6, which is a special case for when a is near 1.
r = tf.math.atan2(1. - a, 1. + a)
# When a = 1, T(h, 1) = 0.5 * ndtr(h) * (1 - ndtr(h)).
# Thus, when a is close to 1, we add a correction term.
normh = 0.5 * tf.math.erfc(h / np.sqrt(2.))
result = 0.5 * normh * (1 - normh)
return tf.where(
tf.math.equal(r, 0.),
result,
result - r * tf.math.exp(
-(1. - a) * tf.math.square(h) / (2 * r)) / (2 * np.pi))
def _owens_t_regions(h, a):
"""Returns a list of Tensors describing the region of computation."""
# We assume h >= 0, 0 <= a <= 1
# Regions 1-7 that use T1.
regions = []
is_in_region1 = (h <= 0.06) & (a <= 0.025)
is_in_region1 = is_in_region1 | (h <= 0.02) & (a <= 0.09)
regions.append(is_in_region1)
is_in_region2 = (h <= 0.02) & (a >= 0.09)
is_in_region2 = (is_in_region2 |
(h >= 0.02) & (h <= 0.06) & (a >= 0.025) & (a <= 0.36))
is_in_region2 = is_in_region2 | (h >= 0.06) & (h <= 0.09) & (a <= 0.09)
regions.append(is_in_region2)
is_in_region3 = (h >= 0.02) & (h <= 0.06) & (a >= 0.36)
is_in_region3 = (is_in_region3 |
(h >= 0.06) & (h <= 0.09) & (a >= 0.09) & (a <= 0.5))
is_in_region3 = (is_in_region3 |
(h >= 0.09) & (h <= 0.26) & (a >= 0.025) & (a <= 0.15))
regions.append(is_in_region3)
is_in_region4 = (h >= 0.06) & (h <= 0.125) & (a >= 0.9)
regions.append(is_in_region4)
is_in_region5 = (h >= 0.06) & (h <= 0.26) & (a >= 0.5) & (a <= 0.9)
is_in_region5 = (is_in_region5 |
(h >= 0.09) & (h <= 0.26) & (a >= 0.15) & (a <= 0.5))
is_in_region5 = (is_in_region5 |
(h >= 0.26) & (h <= 0.6) & (a >= 0.025) & (a <= 0.36))
regions.append(is_in_region5)
is_in_region6 = (h >= 0.26) & (h <= 0.6) & (a >= 0.36) & (a <= 0.9)
is_in_region6 = is_in_region6 | (h >= 0.125) & (h <= 0.4) & (a >= 0.9)
regions.append(is_in_region6)
is_in_region7 = (h >= 0.6) & (h <= 1.7) & (a >= 0.15) & (a <= 0.36)
regions.append(is_in_region7)
is_in_region8 = (h >= 0.6) & (h <= 1.7) & (a >= 0.36) & (a <= 0.9)
is_in_region8 = (is_in_region8 |
(h >= 0.4) & (h <= 1.6) & (a >= 0.9) & (a <= 0.99999))
regions.append(is_in_region8)
is_in_region9 = (h >= 4.8) & (a <= 0.09)
regions.append(is_in_region9)
is_in_region10 = (h >= 4.8) & (a >= 0.09) & (a <= 0.36)
regions.append(is_in_region10)
is_in_region11 = (h >= 4.8) & (a >= 0.36) & (a <= 0.5)
regions.append(is_in_region11)
is_in_region12 = (h >= 3.4) & (a >= 0.9)
is_in_region12 = is_in_region12 | (h >= 3.36) & (a >= 0.36) & (a <= 0.9)
is_in_region12 = is_in_region12 & ~is_in_region11
regions.append(is_in_region12)
is_in_region13 = (h >= 0.09) & (h <= 2.4) & (a <= 0.025)
regions.append(is_in_region13)
is_in_region14 = (h >= 0.6) & (h <= 1.7) & (a >= 0.025) & (a <= 0.09)
regions.append(is_in_region14)
is_in_region15 = (h >= 0.6) & (h <= 2.4) & (a >= 0.025) & (a <= 0.15)
is_in_region15 = is_in_region15 & ~is_in_region14
regions.append(is_in_region15)
is_in_region16 = (h >= 1.7) & (h <= 2.4) & (a >= 0.15) & (a <= 0.36)
is_in_region16 = is_in_region16 | (h >= 2.4) & (h <= 4.8) & (a <= 0.36)
regions.append(is_in_region16)
is_in_region17 = (h >= 1.6) & (h <= 3.4) & (a >= 0.9) & (a <= 0.99999)
is_in_region17 = (is_in_region17 |
(h >= 1.7) & (h <= 3.4) & (a >= 0.36) & (a <= 0.9))
regions.append(is_in_region17)
# Near the line a = 1.
is_in_region18 = (h >= 0.4) & (h <= 2.33) & (a >= 0.99999)
regions.append(is_in_region18)
return regions
def _owens_t_naive_gradient(h, a):
"""Computes OwensT(h, a) with autodiff gradients only."""
dtype = dtype_util.common_dtype([h, a], tf.float32)
numpy_dtype = dtype_util.as_numpy_dtype(dtype)
# OwensT(-h, a) = OwensT(h, a)
h = tf.math.abs(h)
abs_a = tf.math.abs(a)
# Remap arguments such that 0 <= a <= 1.
modified_a = tf.where(
abs_a <= 1.,
abs_a,
tf.math.reciprocal(abs_a))
modified_h = tf.where(abs_a <= 1., h, abs_a * h)
# For regions 1 - 8, we use method1 with different orders.
regions = _owens_t_regions(modified_h, modified_a)
# Short-circuit if we are not in the first 8 regions.
order = numpy_dtype(1.)
order = tf.where(regions[0], numpy_dtype(2.), order)
order = tf.where(regions[1], numpy_dtype(3.), order)
order = tf.where(regions[2], numpy_dtype(4.), order)
order = tf.where(regions[3], numpy_dtype(5.), order)
order = tf.where(regions[4], numpy_dtype(7.), order)
order = tf.where(regions[5], numpy_dtype(10.), order)
order = tf.where(regions[6], numpy_dtype(12.), order)
order = tf.where(regions[7], numpy_dtype(18.), order)
result = _owens_t_method1(modified_h, modified_a, order)
# For regions 9, 10 and 11 we use method2 with different orders.
order = numpy_dtype(1.)
order = tf.where(regions[8], numpy_dtype(10.), order)
order = tf.where(regions[9], numpy_dtype(20.), order)
order = tf.where(regions[10], numpy_dtype(30.), order)
result = tf.where(
regions[8] | regions[9] | regions[10],
_owens_t_method2(modified_h, modified_a, order),
result)
# For region 12 we use method3.
result = tf.where(
regions[11], _owens_t_method3(modified_h, modified_a), result)
# For regions 13, 14, 15 and 16 we use method4 with different orders.
order = numpy_dtype(1.)
order = tf.where(regions[12], numpy_dtype(4.), order)
order = tf.where(regions[13], numpy_dtype(7.), order)
order = tf.where(regions[14], numpy_dtype(8.), order)
order = tf.where(regions[15], numpy_dtype(20.), order)
result = tf.where(
regions[12] | regions[13] | regions[14] | regions[15],
_owens_t_method4(modified_h, modified_a, order),
result)
# For region 17 we use method5.
result = tf.where(
regions[16], _owens_t_method5(modified_h, modified_a), result)
# For region 18, we use method6.
result = tf.where(
regions[17], _owens_t_method6(modified_h, modified_a), result)
result = tf.where(
tf.math.equal(modified_h, 0.),
tf.math.atan(modified_a) / (2 * np.pi), result)
# When a = 1, OwensT(h, 1) = ndtr(h) * (1 - ndtr(h))
result = tf.where(
tf.math.equal(modified_a, 1.),
(0.125 * tf.math.erfc(-modified_h / np.sqrt(2.)) *
tf.math.erfc(modified_h / np.sqrt(2.))), result)
# When a = 0, we should return 0.
result = tf.where(tf.math.equal(modified_a, 0.), numpy_dtype(0.), result)
normh = tf.math.erfc(h / np.sqrt(2.))
normah = tf.math.erfc(abs_a * h / np.sqrt(2.))
# Compensate for when |a| > 1.
result = tf.where(
abs_a > 1.,
tf.where(
abs_a * h <= 0.67,
0.25 - 0.25 * tf.math.erf(
h / np.sqrt(2.)) * tf.math.erf(abs_a * h / np.sqrt(2.)) - result,
0.25 * (normh + normah - normh * normah) - result),
result)
result = tf.math.sign(a) * result
result = tf.where(tf.math.is_nan(a) | tf.math.is_nan(h),
numpy_dtype(np.nan),
result)
return result
def _owens_t_fwd(h, a):
"""Compute output, aux (collaborates with _owens_t_bwd)."""
return _owens_t_naive_gradient(h, a), (h, a)
def _owens_t_bwd(aux, g):
h, a = aux
ph = (-tf.math.exp(-0.5 * tf.math.square(h)) *
tf.math.erf(a * h / np.sqrt(2)) / (2 * np.sqrt(2 * np.pi)))
pa = (tf.math.exp(-0.5 * (tf.math.square(a) + 1) * tf.math.square(h)) /
(2 * np.pi * (tf.math.square(a) + 1.)))
return _fix_gradient_for_broadcasting(h, a, ph * g, pa * g)
def _owens_t_jvp(primals, tangents):
"""Computes JVP for log-beta (supports JAX custom derivative)."""
h, a = primals
dh, da = tangents
# TODO(https://github.com/google/jax/issues/3768): eliminate broadcast_to?
bc_shp = prefer_static.broadcast_shape(prefer_static.shape(dh),
prefer_static.shape(da))
dh = tf.broadcast_to(dh, bc_shp)
da = tf.broadcast_to(da, bc_shp)
ph = (-tf.math.exp(-0.5 * tf.math.square(h)) *
tf.math.erf(a * h / np.sqrt(2)) / (2 * np.sqrt(2 * np.pi)))
pa = (tf.math.exp(-0.5 * (tf.math.square(a) + 1.)* tf.math.square(h)) /
(2 * np.pi * (tf.math.square(a) + 1.)))
return _owens_t_naive_gradient(h, a), ph * dh + pa * da
@tfp_custom_gradient.custom_gradient(
vjp_fwd=_owens_t_fwd,
vjp_bwd=_owens_t_bwd,
jvp_fn=_owens_t_jvp)
def _owens_t_custom_gradient(h, a):
"""Computes OwensT(h, a) with correct custom gradient."""
return _owens_t_naive_gradient(h, a)
def owens_t(h, a, name=None):
# pylint: disable=line-too-long
"""Computes Owen's T function of `h` and `a` element-wise.
Owen's T function is defined as the combined probability of the event `X > h`
and `0 < Y < a * X`, where `X` and `Y` are independent standard normal
random variables.
In integral form this is defined as `1 / (2 * pi)` times the integral of
`exp(-0.5 * h ** 2 * (1 + x ** 2)) / (1 + x ** 2)` from `0` to `a`.
`h` and `a` can be any real number
The Owen's T implementation below is based on
([Patefield and Tandy, 2000][1]).
The Owen's T function has several notable properties which
we list here for convenience. ([Owen, 1980][2], page 414)
- P2.1 `T( h, 0) = 0`
- P2.2 `T( 0, a) = arctan(a) / (2 pi)`
- P2.3 `T( h, 1) = Phi(h) (1 - Phi(h)) / 2`
- P2.4 `T( h, inf) = (1 - Phi(|h|)) / 2`
- P2.5 `T(-h, a) = T(h, a)`
- P2.6 `T( h,-a) = -T(h, a)`
- P2.7 `T( h, a) + T(a h, 1 / a) = Phi(h)/2 + Phi(ah)/2 - Phi(h) Phi(ah) - [a<0]/2`
- P2.8 `T( h, a) = arctan(a)/(2 pi) - 1/(2 pi) int_0^h int_0^{ax}` exp(-(x**2 + y**2)/2) dy dx`
- P2.9 `T( h, a) = arctan(a)/(2 pi) - int_0**h phi(x) Phi(a x) dx + Phi(h)/2 - 1/4`
`[a<0]` uses Iverson bracket notation, i.e., `[a<0] = {1 if a<0 and 0 otherwise`.
Let us also define P2.10 as:
- P2.10 `T(inf, a) = 0`
- Proof
Note that result #10,010.6 ([Owen, 1980][2], pg 403) states that:
`int_0^inf phi(x) Phi(a+bx) dx = Phi(a/rho)/2 + T(a/rho,b) where rho = sqrt(1+b**2).`
Using `a=0`, this result is:
`int_0^inf phi(x) Phi(bx) dx = 1/4 + T(0,b) = 1/4 + arctan(b) / (2 pi)`
Combining this with P2.9 implies
```none
T(inf, a)
= arctan(a)/(2 pi) - [ 1/4 + arctan(a) / (2 pi)] + Phi(inf)/2 - 1/4
= -1/4 + 1/2 -1/4 = 0.
```
QED
Args:
h: A `float` `Tensor` defined as in `P({X > h, 0 < Y < a X})`. Must be
broadcastable with `a`.
a: A `float` `Tensor` defined as in `P({X > h, 0 < Y < a X})`. Must be
broadcastable with `h`.
name: A name for the operation (optional).
Returns:
owens_t: A `Tensor` with the same type as `h` and `a`,
#### References
[1]: Patefield, Mike, and <NAME>. "Fast and accurate calculation
of Owen’s T function." Journal of Statistical Software 5.5 (2000): 1-25.
http://www.jstatsoft.org/v05/i05/paper
[2]: <NAME>. "A table of normal integrals: A table."
Communications in Statistics-Simulation and Computation 9.4 (1980):
389-419.
"""
# pylint: enable=line-too-long
with tf.name_scope(name or 'owens_t'):
dtype = dtype_util.common_dtype([h, a], tf.float32)
h = tf.convert_to_tensor(h, dtype=dtype, name='h')
a = tf.convert_to_tensor(a, dtype=dtype, name='a')
return _owens_t_custom_gradient(h, a)
| 29,873 |
394 |
<reponame>fanghaocong/GitlHEVCAnalyzer
#include "refreshscreencommand.h"
#include "model/modellocator.h"
#include <QPixmap>
RefreshScreenCommand::RefreshScreenCommand(QObject *parent) :
GitlAbstractCommand(parent)
{
}
bool RefreshScreenCommand::execute( GitlCommandParameter& rcInputArg, GitlCommandParameter& rcOutputArg )
{
ModelLocator* pModel = ModelLocator::getInstance();
ComSequence *pcCurSeq = pModel->getSequenceManager().getCurrentSequence();
if(pcCurSeq == NULL)
return false;
int iPoc = pModel->getFrameBuffer().getFrameCount();
int iMaxPoc = pcCurSeq->getTotalFrames()-1;
int iMinPoc = 0;
iPoc = VALUE_CLIP(iMinPoc, iMaxPoc, iPoc);
QPixmap* pcFramePixmap = pModel->getFrameBuffer().getFrame(iPoc);
pcFramePixmap = pModel->getDrawEngine().drawFrame(pcCurSeq, iPoc, pcFramePixmap); ///< Draw Frame Buffer
///
rcOutputArg.setParameter("picture", QVariant::fromValue((void*)(pcFramePixmap)));
rcOutputArg.setParameter("current_frame_poc", iPoc);
rcOutputArg.setParameter("total_frame_num", pcCurSeq->getTotalFrames());
return true;
}
| 456 |
585 |
<filename>caffe2/python/layers/add_bias.py
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package add_bias
# Module caffe2.python.layers.add_bias
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import math
class AddBias(ModelLayer):
def __init__(self, model, input_record, bias_init=None,
bias_optim=None, name='add_bias'):
super(AddBias, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_type().shape) > 0, (
"AddBias expects limited dimensions of the input tensor")
input_dims = input_record.field_type().shape[0]
assert input_dims > 0, (
"AddBias expects input dimensions > 0, got {}".format(input_dims))
scale = math.sqrt(1.0 / input_dims)
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.b = self.create_param(
param_name='b',
shape=[input_dims, ],
initializer=bias_init,
optimizer=bias_optim,
)
self.output_schema = schema.Scalar(
(input_record.field_type().base, (input_dims, )),
self.get_next_blob_reference('output')
)
def add_ops(self, net):
net.Add(self.input_record.field_blobs() + [self.b],
self.output_schema.field_blobs(), broadcast=1)
| 851 |
460 |
<reponame>MarianMacik/thorntail
package org.wildfly.swarm.msc.test;
import org.jboss.msc.service.ServiceActivator;
import org.jboss.msc.service.ServiceActivatorContext;
import org.jboss.msc.service.ServiceName;
import org.jboss.msc.service.ServiceRegistryException;
import org.jboss.msc.service.ServiceTarget;
import org.jboss.msc.service.ValueService;
import org.jboss.msc.value.ImmediateValue;
/**
* @author <NAME>
*/
public class MyServiceActivator implements ServiceActivator {
@Override
public void activate(ServiceActivatorContext context) throws ServiceRegistryException {
ServiceTarget target = context.getServiceTarget();
target.addService(ServiceName.of("swarm", "test", "cheese"), new ValueService<>(new ImmediateValue<>("cheddar")))
.install();
}
}
| 280 |
559 |
<filename>embed/examples/fm-radio.c
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <luaradio.h>
const char *script_template =
"local frequency = %f\n"
"return radio.CompositeBlock():connect("
" radio.RtlSdrSource(frequency - 250e3, 1102500),"
" radio.TunerBlock(-250e3, 200e3, 5),"
" radio.WBFMMonoDemodulator(),"
" radio.DownsamplerBlock(5),"
" radio.PulseAudioSink(1)"
")";
int main(int argc, char *argv[]) {
luaradio_t *radio;
char script[512];
if (argc < 2) {
fprintf(stderr, "Usage: %s <FM station frequency>\n", argv[0]);
return -1;
}
/* Substitute station frequency in script template */
snprintf(script, sizeof(script), script_template, atof(argv[1]));
/* Create context */
if ((radio = luaradio_new()) == NULL) {
perror("Allocating memory");
return -1;
}
/* Load flow graph */
if (luaradio_load(radio, script) < 0) {
fprintf(stderr, "Error loading flow graph: %s\n", luaradio_strerror(radio));
return -1;
}
/* Start flow graph */
if (luaradio_start(radio) < 0) {
fprintf(stderr, "Error starting flow graph: %s\n", luaradio_strerror(radio));
return -1;
}
/* Wait until completion */
if (luaradio_wait(radio) < 0) {
fprintf(stderr, "Error waiting for flow graph: %s\n", luaradio_strerror(radio));
return -1;
}
/* Free context */
luaradio_free(radio);
return 0;
}
| 684 |
543 |
<reponame>gbaasch/Gnu-RL<gh_stars>100-1000
#!/usr/bin/env python3
import argparse
import torch
from torch.autograd import Function, Variable
import torch.nn.functional as F
from torch import nn
from torch.nn.parameter import Parameter
import numpy as np
import os
from mpc import mpc, util
from mpc.mpc import GradMethods
import mpc.util as eutil
from mpc.env_dx import pendulum, cartpole
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='pendulum')
args = parser.parse_args()
n_batch = 1
if args.env == 'pendulum':
T = 20
dx = pendulum.PendulumDx()
xinit = torch.zeros(n_batch, dx.n_state)
th = 1.0
xinit[:,0] = np.cos(th)
xinit[:,1] = np.sin(th)
xinit[:,2] = -0.5
elif args.env == 'cartpole':
T = 20
dx = cartpole.CartpoleDx()
xinit = torch.zeros(n_batch, dx.n_state)
th = 0.5
xinit[:,2] = np.cos(th)
xinit[:,3] = np.sin(th)
else:
assert False
q, p = dx.get_true_obj()
u = None
ep_length = 100
for t in range(ep_length):
x, u = solve_lqr(
dx, xinit, q, p, T, dx.linesearch_decay, dx.max_linesearch_iter, u)
fig, ax = dx.get_frame(x[0])
fig.savefig('{:03d}.png'.format(t))
plt.close(fig)
u = torch.cat((u[1:-1], u[-2:]), 0).contiguous()
xinit = x[1]
vid_file = 'ctrl_{}.mp4'.format(args.env)
if os.path.exists(vid_file):
os.remove(vid_file)
cmd = ('/usr/bin/ffmpeg -loglevel quiet '
'-r 32 -f image2 -i %03d.png -vcodec '
'libx264 -crf 25 -pix_fmt yuv420p {}').format(
vid_file
)
os.system(cmd)
for t in range(ep_length):
os.remove('{:03d}.png'.format(t))
def solve_lqr(dx, xinit, q, p, T,
linesearch_decay, max_linesearch_iter, u_init=None):
n_sc = dx.n_state+dx.n_ctrl
n_batch = 1
Q = torch.diag(q).unsqueeze(0).unsqueeze(0).repeat(
T, n_batch, 1, 1
)
p = p.unsqueeze(0).repeat(T, n_batch, 1)
lqr_iter = 100 if u_init is None else 10
x_lqr, u_lqr, objs_lqr = mpc.MPC(
dx.n_state, dx.n_ctrl, T, xinit,
u_lower=dx.lower, u_upper=dx.upper, u_init=u_init,
lqr_iter=lqr_iter,
verbose=1,
exit_unconverged=False,
detach_unconverged=False,
linesearch_decay=linesearch_decay,
max_linesearch_iter=max_linesearch_iter,
grad_method=GradMethods.AUTO_DIFF,
eps=1e-4,
# slew_rate_penalty=self.slew_rate_penalty,
# prev_ctrl=prev_ctrl,
)(Q, p, dx)
return x_lqr, u_lqr
if __name__ == '__main__':
main()
| 1,426 |
769 |
#include "gl/mesh.h"
#include "gl/shaderProgram.h"
#include "gl/renderState.h"
#include "gl/hardware.h"
#include "gl/glError.h"
#include "platform.h"
#include "log.h"
namespace Tangram {
MeshBase::MeshBase() {
m_drawMode = GL_TRIANGLES;
m_hint = GL_STATIC_DRAW;
m_glVertexBuffer = 0;
m_glIndexBuffer = 0;
m_nVertices = 0;
m_nIndices = 0;
m_dirtyOffset = 0;
m_dirtySize = 0;
m_dirty = false;
m_isUploaded = false;
m_isCompiled = false;
}
MeshBase::MeshBase(std::shared_ptr<VertexLayout> _vertexLayout, GLenum _drawMode, GLenum _hint)
: MeshBase()
{
m_vertexLayout = _vertexLayout;
m_hint = _hint;
setDrawMode(_drawMode);
}
MeshBase::~MeshBase() {
if (m_rs) {
if (m_glVertexBuffer || m_glIndexBuffer) {
GLuint buffers[] = { m_glVertexBuffer, m_glIndexBuffer };
m_rs->queueBufferDeletion(2, buffers);
}
m_vaos.dispose(*m_rs);
}
if (m_glVertexData) {
delete[] m_glVertexData;
}
if (m_glIndexData) {
delete[] m_glIndexData;
}
}
void MeshBase::setVertexLayout(std::shared_ptr<VertexLayout> _vertexLayout) {
m_vertexLayout = _vertexLayout;
}
void MeshBase::setDrawMode(GLenum _drawMode) {
switch (_drawMode) {
case GL_POINTS:
case GL_LINE_STRIP:
case GL_LINE_LOOP:
case GL_LINES:
case GL_TRIANGLE_STRIP:
case GL_TRIANGLE_FAN:
case GL_TRIANGLES:
m_drawMode = _drawMode;
break;
default:
LOGW("Invalid draw mode for mesh! Defaulting to GL_TRIANGLES");
m_drawMode = GL_TRIANGLES;
}
}
void MeshBase::subDataUpload(RenderState& rs, GLbyte* _data) {
if (!m_dirty && _data == nullptr) { return; }
if (m_hint == GL_STATIC_DRAW) {
LOGW("Wrong usage hint provided to the Vbo");
assert(false);
}
GLbyte* data = _data ? _data : m_glVertexData;
rs.vertexBuffer(m_glVertexBuffer);
long vertexBytes = m_nVertices * m_vertexLayout->getStride();
// invalidate/orphane the data store on the driver
GL::bufferData(GL_ARRAY_BUFFER, vertexBytes, NULL, m_hint);
if (Hardware::supportsMapBuffer) {
GLvoid* dataStore = GL::mapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
// write memory client side
std::memcpy(dataStore, data, vertexBytes);
GL::unmapBuffer(GL_ARRAY_BUFFER);
} else {
// if this buffer is still used by gpu on current frame this call will not wait
// for the frame to finish using the vbo but "directly" send command to upload the data
GL::bufferData(GL_ARRAY_BUFFER, vertexBytes, data, m_hint);
}
m_dirty = false;
}
void MeshBase::upload(RenderState& rs) {
// Generate vertex buffer, if needed
if (m_glVertexBuffer == 0) {
GL::genBuffers(1, &m_glVertexBuffer);
}
// Buffer vertex data
int vertexBytes = m_nVertices * m_vertexLayout->getStride();
rs.vertexBuffer(m_glVertexBuffer);
GL::bufferData(GL_ARRAY_BUFFER, vertexBytes, m_glVertexData, m_hint);
delete[] m_glVertexData;
m_glVertexData = nullptr;
if (m_glIndexData) {
if (m_glIndexBuffer == 0) {
GL::genBuffers(1, &m_glIndexBuffer);
}
// Buffer element index data
rs.indexBuffer(m_glIndexBuffer);
GL::bufferData(GL_ELEMENT_ARRAY_BUFFER, m_nIndices * sizeof(GLushort), m_glIndexData, m_hint);
delete[] m_glIndexData;
m_glIndexData = nullptr;
}
m_rs = &rs;
m_isUploaded = true;
}
bool MeshBase::draw(RenderState& rs, ShaderProgram& _shader, bool _useVao) {
bool useVao = _useVao && Hardware::supportsVAOs;
if (!m_isCompiled) { return false; }
if (m_nVertices == 0) { return false; }
// Enable shader program
if (!_shader.use(rs)) {
return false;
}
// Ensure that geometry is buffered into GPU
if (!m_isUploaded) {
upload(rs);
} else if (m_dirty) {
subDataUpload(rs);
}
if (useVao) {
if (!m_vaos.isInitialized()) {
// Capture vao state
m_vaos.initialize(rs, _shader, m_vertexOffsets, *m_vertexLayout, m_glVertexBuffer, m_glIndexBuffer);
}
} else {
// Bind buffers for drawing
rs.vertexBuffer(m_glVertexBuffer);
if (m_nIndices > 0) {
rs.indexBuffer(m_glIndexBuffer);
}
}
size_t indiceOffset = 0;
size_t vertexOffset = 0;
for (size_t i = 0; i < m_vertexOffsets.size(); ++i) {
auto& o = m_vertexOffsets[i];
uint32_t nIndices = o.first;
uint32_t nVertices = o.second;
if (!useVao) {
// Enable vertex attribs via vertex layout object
size_t byteOffset = vertexOffset * m_vertexLayout->getStride();
m_vertexLayout->enable(rs, _shader, byteOffset);
} else {
// Bind the corresponding vao relative to the current offset
m_vaos.bind(i);
}
// Draw as elements or arrays
if (nIndices > 0) {
GL::drawElements(m_drawMode, nIndices, GL_UNSIGNED_SHORT,
(void*)(indiceOffset * sizeof(GLushort)));
} else if (nVertices > 0) {
GL::drawArrays(m_drawMode, 0, nVertices);
}
vertexOffset += nVertices;
indiceOffset += nIndices;
}
if (useVao) {
m_vaos.unbind();
}
return true;
}
size_t MeshBase::bufferSize() const {
return m_nVertices * m_vertexLayout->getStride() + m_nIndices * sizeof(GLushort);
}
// Add indices by collecting them into batches to draw as much as
// possible in one draw call. The indices must be shifted by the
// number of vertices that are present in the current batch.
size_t MeshBase::compileIndices(const std::vector<std::pair<uint32_t, uint32_t>>& _offsets,
const std::vector<uint16_t>& _indices, size_t _offset) {
GLushort* dst = m_glIndexData + _offset;
size_t curVertices = 0;
size_t src = 0;
if (m_vertexOffsets.empty()) {
m_vertexOffsets.emplace_back(0, 0);
} else {
curVertices = m_vertexOffsets.back().second;
}
for (auto& p : _offsets) {
size_t nIndices = p.first;
size_t nVertices = p.second;
if (curVertices + nVertices > MAX_INDEX_VALUE) {
m_vertexOffsets.emplace_back(0, 0);
curVertices = 0;
}
for (size_t i = 0; i < nIndices; i++, dst++) {
*dst = _indices[src++] + curVertices;
}
auto& offset = m_vertexOffsets.back();
offset.first += nIndices;
offset.second += nVertices;
curVertices += nVertices;
}
return _offset + src;
}
void MeshBase::setDirty(GLintptr _byteOffset, GLsizei _byteSize) {
if (!m_dirty) {
m_dirty = true;
m_dirtySize = _byteSize;
m_dirtyOffset = _byteOffset;
} else {
size_t end = std::max(m_dirtyOffset + m_dirtySize, _byteOffset + _byteSize);
m_dirtyOffset = std::min(m_dirtyOffset, _byteOffset);
m_dirtySize = end - m_dirtyOffset;
}
}
}
| 3,319 |
427 |
/** Handling for borrow expression nodes
* @file
*
* This source file is part of the Cone Programming Language C compiler
* See Copyright Notice in conec.h
*/
#include "../ir.h"
#include <assert.h>
// Inject a typed, borrowed node on some node (expected to be an lval)
void borrowMutRef(INode **nodep, INode* type, INode *perm) {
INode *node = *nodep;
// Rather than borrow from a deref, just return the ptr node we are de-reffing
if (node->tag == DerefTag) {
StarNode *derefnode = (StarNode *)node;
*nodep = derefnode->vtexp;
return;
}
if (iexpIsLvalError(node) == 0) {
errorMsgNode(node, ErrorInvType, "Auto-borrowing can only be done on an lval");
}
RefNode *reftype = type != unknownType? newRefNodeFull(RefTag, node, borrowRef, perm, type) : (RefNode*)unknownType;
RefNode *borrownode = newRefNodeFull(BorrowTag, node, borrowRef, perm, node);
borrownode->vtype = (INode*)reftype;
*nodep = (INode*)borrownode;
}
// Auto-inject a borrow note in front of 'from', to create totypedcl type
void borrowAuto(INode **from, INode *totypedcl) {
// Borrow from array to create arrayref (only one supported currently)
RefNode *arrreftype = (RefNode*)totypedcl;
RefNode *addrtype = newRefNodeFull(ArrayRefTag, *from, borrowRef, newPermUseNode(constPerm), arrreftype->vtexp);
RefNode *borrownode = newRefNode(ArrayBorrowTag);
borrownode->vtype = (INode*)addrtype;
borrownode->vtexp = *from;
*from = (INode*)borrownode;
}
// Can we safely auto-borrow to match expected type?
// Note: totype has already done GetTypeDcl
int borrowAutoMatches(INode *from, RefNode *totype) {
// We can only borrow from an lval
if (!iexpIsLval(from))
return 0;
INode *fromtype = iexpGetTypeDcl(from);
// Handle auto borrow of array to obtain a borrowed array reference (slice)
if (totype->tag == ArrayRefTag && fromtype->tag == ArrayTag) {
return (itypeIsSame(((RefNode*)totype)->vtexp, arrayElemType(fromtype))
&& itypeGetTypeDcl(totype->perm) == (INode*)constPerm && itypeGetTypeDcl(totype->region) == borrowRef);
}
return 0;
}
// Serialize borrow node
void borrowPrint(RefNode *node) {
inodeFprint("&(");
inodePrintNode(node->vtype);
inodeFprint("->");
inodePrintNode(node->vtexp);
inodeFprint(")");
}
// Analyze borrow node
void borrowTypeCheck(TypeCheckState *pstate, RefNode **nodep) {
RefNode *node = *nodep;
if (iexpTypeCheckAny(pstate, &node->vtexp) == 0 || iexpIsLvalError(node->vtexp) == 0)
return;
// Auto-deref the exp, if we are borrowing a reference to a reference's field or indexed value
INode *exptype = iexpGetTypeDcl(node->vtexp);
if ((node->flags & FlagSuffix) && (exptype->tag == RefTag || exptype->tag == PtrTag || exptype->tag == ArrayRefTag)) {
StarNode *deref = newStarNode(DerefTag);
deref->vtexp = node->vtexp;
if (exptype->tag == ArrayRefTag)
deref->vtype = (INode*)newArrayDerefNodeFrom((RefNode*)exptype);
else
deref->vtype = ((RefNode*)exptype)->vtexp; // assumes StarNode has field in same place
node->vtexp = (INode*)deref;
}
// Setup lval, perm and scope info as if we were borrowing from a global constant literal.
// If not, extract this info from expression nodes
uint16_t scope = 0; // global
INode *lval = node->vtexp;
INode *lvalperm = (INode*)immPerm;
scope = 0; // Global
if (lval->tag != StringLitTag) {
// lval is the variable or variable sub-structure we want to get a reference to
// From it, obtain variable we are borrowing from and actual/calculated permission
INode *lvalvar = iexpGetLvalInfo(lval, &lvalperm, &scope);
if (lvalvar == NULL) {
node->vtype = (INode*)newRefNodeFull(RefTag, (INode*)node, node->region, node->perm, (INode*)unknownType); // To avoid a crash later
return;
}
// Set lifetime of reference to borrowed variable's lifetime
if (lvalvar->tag == VarDclTag)
scope = ((VarDclNode*)lvalvar)->scope;
}
INode *lvaltype = ((IExpNode*)lval)->vtype;
// The reference's value type is currently unknown
// Let's infer this value type from the lval we are borrowing from
uint16_t tag;
INode *refvtype;
if (node->tag == BorrowTag) {
tag = RefTag;
refvtype = lvaltype;
}
else {
tag = ArrayRefTag; // Borrowing to create an array reference
if (lvaltype->tag == ArrayTag) {
refvtype = arrayElemType(lvaltype);
}
else if (lvaltype->tag == ArrayDerefTag) {
refvtype = ((RefNode*)lvaltype)->vtexp;
}
else
refvtype = lvaltype; // a one-element slice!
}
// Ensure requested/inferred permission matches lval's permission
INode *refperm = node->perm;
if (refperm == unknownType)
refperm = newPermUseNode(itypeIsConcrete(refvtype) ? constPerm : opaqPerm);
if (!permMatches(refperm, lvalperm))
errorMsgNode((INode *)node, ErrorBadPerm, "Borrowed reference cannot obtain this permission");
RefNode *reftype = newRefNodeFull(tag, (INode*)node, borrowRef, refperm, refvtype);
reftype->scope = scope;
node->vtype = (INode *)reftype;
}
// Perform data flow analysis on addr node
void borrowFlow(FlowState *fstate, RefNode **nodep) {
RefNode *node = *nodep;
RefNode *reftype = (RefNode *)node->vtype;
// Borrowed reference: Deactivate source variable if necessary
}
| 2,237 |
319 |
package me.aartikov.alligator.helpers;
import android.app.Activity;
import android.content.Intent;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.fragment.app.DialogFragment;
import androidx.fragment.app.Fragment;
import me.aartikov.alligator.ActivityResult;
import me.aartikov.alligator.Screen;
import me.aartikov.alligator.ScreenResult;
import me.aartikov.alligator.destinations.ActivityDestination;
import me.aartikov.alligator.destinations.DialogFragmentDestination;
import me.aartikov.alligator.destinations.FragmentDestination;
import me.aartikov.alligator.exceptions.InvalidScreenResultException;
import me.aartikov.alligator.exceptions.NavigationException;
import me.aartikov.alligator.exceptions.ScreenRegistrationException;
import me.aartikov.alligator.listeners.ScreenResultListener;
import me.aartikov.alligator.navigationfactories.NavigationFactory;
/**
* Helps to return a screen result from activities and fragments.
*/
public class ScreenResultHelper {
public static final String KEY_REQUEST_CODE = "me.aartikov.alligator.KEY_REQUEST_CODE";
public static final String KEY_RESULT_CODE = "me.aartikov.alligator.KEY_RESULT_CODE";
private NavigationFactory mNavigationFactory;
public ScreenResultHelper(NavigationFactory navigationFactory) {
mNavigationFactory = navigationFactory;
}
public void setActivityResult(@NonNull Activity activity, @NonNull ScreenResult screenResult) throws NavigationException {
ActivityDestination activityDestination = getAndValidateActivityDestination(activity, screenResult);
ActivityResult activityResult = activityDestination.createActivityResult(screenResult);
activity.setResult(activityResult.getResultCode(), activityResult.getIntent());
}
public void setResultToIntent(@NonNull Intent intent, @NonNull Activity activity, @NonNull ScreenResult screenResult) throws NavigationException {
ActivityDestination activityDestination = getAndValidateActivityDestination(activity, screenResult);
ActivityResult activityResult = activityDestination.createActivityResult(screenResult);
intent.putExtra(KEY_REQUEST_CODE, activityDestination.getRequestCode());
intent.putExtra(KEY_RESULT_CODE, activityResult.getResultCode());
Intent resultIntent = activityResult.getIntent();
if (resultIntent != null) {
intent.putExtras(resultIntent);
}
}
@NonNull
private ActivityDestination getAndValidateActivityDestination(@NonNull Activity activity, @NonNull ScreenResult screenResult) throws NavigationException {
Class<? extends Screen> screenClass = mNavigationFactory.getScreenClass(activity);
if (screenClass == null) {
throw new ScreenRegistrationException("Failed to get a screen class for " + activity.getClass().getSimpleName());
}
ActivityDestination activityDestination = (ActivityDestination) mNavigationFactory.getDestination(screenClass);
if (activityDestination == null) {
throw new ScreenRegistrationException("Failed to get a destination for " + screenClass.getSimpleName());
}
if (activityDestination.getScreenResultClass() == null) {
throw new InvalidScreenResultException("Screen " + screenClass.getSimpleName() + " can't return a result.");
}
Class<? extends ScreenResult> supportedScreenResultClass = activityDestination.getScreenResultClass();
if (!supportedScreenResultClass.isAssignableFrom(screenResult.getClass())) {
throw new InvalidScreenResultException("Screen " + screenClass.getSimpleName() + " can't return a result of class " + screenResult.getClass().getCanonicalName() +
". It returns a result of class " + supportedScreenResultClass.getCanonicalName());
}
return activityDestination;
}
public void callScreenResultListener(@NonNull Fragment fragment, @Nullable ScreenResult screenResult, @NonNull ScreenResultListener screenResultListener) throws NavigationException {
Class<? extends Screen> screenClass = mNavigationFactory.getScreenClass(fragment);
if (screenClass == null) {
throw new ScreenRegistrationException("Failed to get a screen class for " + fragment.getClass().getSimpleName());
}
FragmentDestination fragmentDestination = (FragmentDestination) mNavigationFactory.getDestination(screenClass);
if (fragmentDestination == null) {
throw new ScreenRegistrationException("Failed to get a destination for " + screenClass.getSimpleName());
}
Class<? extends ScreenResult> supportedScreenResultClass = fragmentDestination.getScreenResultClass();
if (supportedScreenResultClass == null) {
if (screenResult == null) {
return;
} else {
throw new InvalidScreenResultException("Screen " + screenClass.getSimpleName() + " can't return a result.");
}
}
if (screenResult != null && !supportedScreenResultClass.isAssignableFrom(screenResult.getClass())) {
throw new InvalidScreenResultException("Screen " + screenClass.getSimpleName() + " can't return a result of class " + screenResult.getClass().getCanonicalName() +
". It returns a result of class " + supportedScreenResultClass.getCanonicalName());
}
screenResultListener.onScreenResult(screenClass, screenResult);
}
public void callScreenResultListener(@NonNull DialogFragment dialogFragment, @Nullable ScreenResult screenResult, @NonNull ScreenResultListener screenResultListener) throws NavigationException {
Class<? extends Screen> screenClass = mNavigationFactory.getScreenClass(dialogFragment);
if (screenClass == null) {
throw new ScreenRegistrationException("Failed to get a screen class for " + dialogFragment.getClass().getSimpleName());
}
DialogFragmentDestination dialogFragmentDestination = (DialogFragmentDestination) mNavigationFactory.getDestination(screenClass);
if (dialogFragmentDestination == null) {
throw new ScreenRegistrationException("Failed to get a destination for " + screenClass.getSimpleName());
}
Class<? extends ScreenResult> supportedScreenResultClass = dialogFragmentDestination.getScreenResultClass();
if (supportedScreenResultClass == null) {
if (screenResult == null) {
return;
} else {
throw new InvalidScreenResultException("Screen " + screenClass.getSimpleName() + " can't return a result.");
}
}
if (screenResult != null && !supportedScreenResultClass.isAssignableFrom(screenResult.getClass())) {
throw new InvalidScreenResultException("Screen " + screenClass.getSimpleName() + " can't return a result of class " + screenResult.getClass().getCanonicalName() +
". It returns a result of class " + supportedScreenResultClass.getCanonicalName());
}
screenResultListener.onScreenResult(screenClass, screenResult);
}
}
| 1,976 |
579 |
<reponame>agnesnatasya/Be-Tree
#ifdef ERPC_INFINIBAND
#include "ib_transport.h"
namespace erpc {
// Packets that are the first packet in their MsgBuffer use one DMA, and may
// be inlined. Packets that are not the first packet use two DMAs, and are never
// inlined for simplicity.
void IBTransport::tx_burst(const tx_burst_item_t* tx_burst_arr,
size_t num_pkts) {
for (size_t i = 0; i < num_pkts; i++) {
const tx_burst_item_t& item = tx_burst_arr[i];
const MsgBuffer* msg_buffer = item.msg_buffer_;
// Verify constant fields of work request
struct ibv_send_wr& wr = send_wr[i];
struct ibv_sge* sgl = send_sgl[i];
assert(wr.next == &send_wr[i + 1]); // +1 is valid
assert(wr.wr.ud.remote_qkey == kQKey);
assert(wr.opcode == IBV_WR_SEND_WITH_IMM);
assert(wr.sg_list == sgl);
// Set signaling + poll SEND CQ if needed. The wr is non-inline by default.
wr.send_flags = get_signaled_flag() ? IBV_SEND_SIGNALED : 0;
if (item.pkt_idx_ == 0) {
// This is the first packet, so we need only 1 SGE. This can be CR/RFR.
const pkthdr_t* pkthdr = msg_buffer->get_pkthdr_0();
sgl[0].addr = reinterpret_cast<uint64_t>(pkthdr);
sgl[0].length = msg_buffer->get_pkt_size<kMaxDataPerPkt>(0);
sgl[0].lkey = msg_buffer->buffer_.lkey_;
// Only single-SGE work requests are inlined
wr.send_flags |= (sgl[0].length <= kMaxInline) ? IBV_SEND_INLINE : 0;
wr.num_sge = 1;
} else {
// This is not the first packet, so we need 2 SGEs. This involves a
// a division, which is OK because it is a large message.
const pkthdr_t* pkthdr = msg_buffer->get_pkthdr_n(item.pkt_idx_);
sgl[0].addr = reinterpret_cast<uint64_t>(pkthdr);
sgl[0].length = static_cast<uint32_t>(sizeof(pkthdr_t));
sgl[0].lkey = msg_buffer->buffer_.lkey_;
size_t offset = item.pkt_idx_ * kMaxDataPerPkt;
sgl[1].addr = reinterpret_cast<uint64_t>(&msg_buffer->buf_[offset]);
sgl[1].length =
(std::min)(kMaxDataPerPkt, msg_buffer->data_size_ - offset);
sgl[1].lkey = msg_buffer->buffer_.lkey_;
wr.num_sge = 2;
}
const auto* ib_rinfo =
reinterpret_cast<ib_routing_info_t*>(item.routing_info_);
wr.wr.ud.ah = ib_rinfo->ah;
wr.wr.ud.remote_qpn = ib_rinfo->qpn;
if (kTesting && item.drop_) wr.wr.ud.remote_qpn = 0;
}
send_wr[num_pkts - 1].next = nullptr; // Breaker of chains, first of her name
struct ibv_send_wr* bad_wr;
int ret = ibv_post_send(qp, &send_wr[0], &bad_wr);
if (unlikely(ret != 0)) {
fprintf(stderr, "eRPC: Fatal error. ibv_post_send failed. ret = %d\n", ret);
assert(ret == 0);
exit(-1);
}
send_wr[num_pkts - 1].next = &send_wr[num_pkts]; // Restore chain; safe
}
void IBTransport::tx_flush() {
if (unlikely(nb_tx == 0)) return;
// If we are here, we have sent a packet. The selective signaling logic
// guarantees that there is *exactly one* *signaled* SEND work request.
poll_cq_one_helper(send_cq); // Poll the one existing signaled WQE
// Use send_wr[0] to post the second signaled flush WQE
struct ibv_send_wr& wr = send_wr[0];
struct ibv_sge* sgl = send_sgl[0];
assert(wr.next == &send_wr[1]); // +1 is valid
assert(wr.wr.ud.remote_qkey == kQKey);
assert(wr.opcode == IBV_WR_SEND_WITH_IMM);
assert(wr.sg_list == send_sgl[0]);
// We could use a header-only SEND, but the optimized inline-copy function in
// the modded driver expects WQEs with exactly one SGE.
char flush_inline_buf[1];
sgl[0].addr = reinterpret_cast<uint64_t>(flush_inline_buf);
sgl[0].length = 1;
wr.next = nullptr; // Break the chain
wr.send_flags = IBV_SEND_SIGNALED | IBV_SEND_INLINE;
wr.num_sge = 1;
wr.wr.ud.remote_qpn = 0; // Invalid QPN, which will cause the drop
wr.wr.ud.ah = self_ah; // Send to self
struct ibv_send_wr* bad_wr;
int ret = ibv_post_send(qp, &send_wr[0], &bad_wr);
assert(ret == 0);
if (unlikely(ret != 0)) {
fprintf(stderr, "eRPC Error. tx_flush post_send() failed. ret = %d\n", ret);
exit(-1);
}
wr.next = &send_wr[1]; // Restore the chain
poll_cq_one_helper(send_cq); // Poll the signaled WQE posted above
nb_tx = 0; // Reset signaling logic
testing_.tx_flush_count_++;
}
size_t IBTransport::rx_burst() {
int ret = ibv_poll_cq(recv_cq, kPostlist, recv_wc);
assert(ret >= 0);
return static_cast<size_t>(ret);
}
void IBTransport::post_recvs(size_t num_recvs) {
assert(!fast_recv_used); // Not supported yet
assert(num_recvs <= kRQDepth); // num_recvs can be 0
assert(recvs_to_post < kRecvSlack);
recvs_to_post += num_recvs;
if (recvs_to_post < kRecvSlack) return;
if (use_fast_recv) {
// Construct a special RECV wr that the modded driver understands. Encode
// the number of required RECVs in its num_sge field.
struct ibv_recv_wr special_wr;
special_wr.wr_id = kMagicWrIDForFastRecv;
special_wr.num_sge = recvs_to_post;
struct ibv_recv_wr* bad_wr = &special_wr;
int ret = ibv_post_recv(qp, nullptr, &bad_wr);
if (unlikely(ret != 0)) {
fprintf(stderr, "eRPC IBTransport: Post RECV (fast) error %d\n", ret);
exit(-1);
}
// Reset slack counter
recvs_to_post = 0;
return;
}
// The recvs posted are @first_wr through @last_wr, inclusive
struct ibv_recv_wr *first_wr, *last_wr, *temp_wr, *bad_wr;
int ret;
size_t first_wr_i = recv_head;
size_t last_wr_i = first_wr_i + (recvs_to_post - 1);
if (last_wr_i >= kRQDepth) last_wr_i -= kRQDepth;
first_wr = &recv_wr[first_wr_i];
last_wr = &recv_wr[last_wr_i];
temp_wr = last_wr->next;
last_wr->next = nullptr; // Breaker of chains, queen of the First Men
ret = ibv_post_recv(qp, first_wr, &bad_wr);
if (unlikely(ret != 0)) {
fprintf(stderr, "eRPC IBTransport: Post RECV (normal) error %d\n", ret);
exit(-1);
}
last_wr->next = temp_wr; // Restore circularity
// Update RECV head: go to the last wr posted and take 1 more step
recv_head = last_wr_i;
recv_head = (recv_head + 1) % kRQDepth;
// Reset slack counter
recvs_to_post = 0;
}
} // namespace erpc
#endif
| 2,683 |
335 |
<filename>B/Biopsy_verb.json
{
"word": "Biopsy",
"definitions": [
"Conduct a biopsy on (tissue removed from a living body)"
],
"parts-of-speech": "Verb"
}
| 81 |
679 |
<reponame>IzaakBirch/togglz
package org.togglz.core.util;
import org.junit.jupiter.api.Test;
import org.togglz.core.Feature;
import org.togglz.core.annotation.EnabledByDefault;
import org.togglz.core.annotation.FeatureGroup;
import org.togglz.core.annotation.Label;
import java.lang.annotation.*;
import java.util.List;
import java.util.Set;
import java.util.function.Predicate;
import static java.util.stream.Collectors.toList;
import static org.junit.jupiter.api.Assertions.*;
public class FeatureAnnotationsTest {
@FeatureGroup
@Label("Class Level Group Label")
@Target(ElementType.TYPE)
@Retention(RetentionPolicy.RUNTIME)
private @interface ClassLevelGroup {
}
@ClassLevelGroup
private enum MyFeature implements Feature {
@Label("Some feature with a label")
FEATURE_WITH_LABEL,
// no label annotation
FEATURE_WITHOUT_LABEL,
@EnabledByDefault
FEATURE_ENABLED_BY_DEFAULT
}
private enum MyFeature2 implements Feature {
FEATURE_WITH_NO_ANNOTATIONS
}
@Test
void testGetLabel() {
assertEquals("Some feature with a label", FeatureAnnotations.getLabel(MyFeature.FEATURE_WITH_LABEL));
assertEquals("FEATURE_WITHOUT_LABEL", FeatureAnnotations.getLabel(MyFeature.FEATURE_WITHOUT_LABEL));
}
@Test
void testIsEnabledByDefault() {
assertFalse(FeatureAnnotations.isEnabledByDefault(MyFeature.FEATURE_WITH_LABEL));
assertFalse(FeatureAnnotations.isEnabledByDefault(MyFeature.FEATURE_WITHOUT_LABEL));
assertTrue(FeatureAnnotations.isEnabledByDefault(MyFeature.FEATURE_ENABLED_BY_DEFAULT));
}
@Test
void getAnnotationsWillReturnBothFieldAndClassLevelAnnotations() {
Set<Annotation> result = FeatureAnnotations.getAnnotations(MyFeature.FEATURE_ENABLED_BY_DEFAULT);
assertNotNull(result);
assertEquals(2, result.size());
// verify both EnabledByDefault and ClassLevelGroup are there
List<Annotation> enabledByDefault = result.stream().filter(createAnnotationTypePredicate(EnabledByDefault.class)).collect(toList());
assertFalse(enabledByDefault.isEmpty());
List<Annotation> classLevelGroup = result.stream().filter(createAnnotationTypePredicate(ClassLevelGroup.class)).collect(toList());
assertFalse(classLevelGroup.isEmpty());
}
@Test
void getAnnotationsWillReturnEmptySetWhenThereAreNoAnnotations() {
Set<Annotation> result = FeatureAnnotations.getAnnotations(MyFeature2.FEATURE_WITH_NO_ANNOTATIONS);
assertNotNull(result);
assertEquals(0, result.size());
}
private Predicate<Annotation> createAnnotationTypePredicate(final Class<? extends Annotation> annotationType) {
return annotation -> annotation.annotationType().equals(annotationType);
}
@Test
void getAnnotationWillReturnFieldLevelAnnotation() {
EnabledByDefault result = FeatureAnnotations.getAnnotation(MyFeature.FEATURE_ENABLED_BY_DEFAULT, EnabledByDefault.class);
assertNotNull(result);
}
@Test
void getAnnotationWillReturnClassLevelAnnotation() {
ClassLevelGroup result = FeatureAnnotations.getAnnotation(MyFeature.FEATURE_ENABLED_BY_DEFAULT, ClassLevelGroup.class);
assertNotNull(result);
}
@Test
void getAnnotationWillReturnNullWhenAnnotationDoesNotExist() {
Label result = FeatureAnnotations.getAnnotation(MyFeature.FEATURE_ENABLED_BY_DEFAULT, Label.class);
assertNull(result);
}
}
| 1,278 |
387 |
/*
* Copyright (C) 2014, United States Government, as represented by the
* Administrator of the National Aeronautics and Space Administration.
* All rights reserved.
*
* The Java Pathfinder core (jpf-core) platform is licensed under the
* Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package java.lang;
import java.io.BufferedInputStream;
import java.io.InputStream;
import java.io.PrintStream;
import java.nio.channels.Channel;
import java.util.Map;
import java.util.Properties;
import sun.misc.JavaLangAccess;
import sun.misc.SharedSecrets;
import sun.nio.ch.Interruptible;
import sun.reflect.ConstantPool;
import sun.reflect.annotation.AnnotationType;
public class System {
static Properties properties;
public static InputStream in = new InputStream() {
public int available() { return 0; }
public int read() { return -1; }
public int read(byte[] b, int off, int len) { return 0; }
public int read(byte[] b) { return 0; }
};
public static PrintStream out;
public static PrintStream err;
static {
out = createSystemOut();
err = createSystemErr();
properties = new Properties();
String[] kv = getKeyValuePairs();
for (int i=0; i<kv.length; i+=2){
String key = kv[i];
String val = kv[i+1];
if (key != null && val != null) {
properties.put(kv[i], kv[i+1]);
}
}
// this is the Java 6 sun.misc.SharedSecrets backdoor mechanism which I
// would have prefered not to learn about. It's a mess WRT Java 1.5 / 6 compatibility
// <2do> - most if this isn't supported yet
SharedSecrets.setJavaLangAccess( createJavaLangAccess());
// <2do> this is an approximation that isn't particularly safe since we don't
// initialize sun.misc.VM
//sun.misc.VM.booted();
}
static JavaLangAccess createJavaLangAccess () {
return new JavaLangAccess(){
@Override
public ConstantPool getConstantPool(Class<?> cls) {
throw new UnsupportedOperationException("JavaLangAccess.getConstantPool() not supported yet");
//return cls.getConstantPool();
}
@Override
public void setAnnotationType(Class<?> cls, AnnotationType type) {
throw new UnsupportedOperationException("JavaLangAccess.setAnnotationType() not supported yet");
//cls.setAnnotationType(type);
}
@Override
public AnnotationType getAnnotationType(Class<?> cls) {
throw new UnsupportedOperationException("JavaLangAccess.getAnnotationType() not supported yet");
//return cls.getAnnotationType();
}
@Override
public <E extends Enum<E>> E[] getEnumConstantsShared(Class<E> cls) {
return cls.getEnumConstantsShared();
}
@Override
public void blockedOn(Thread t, Interruptible b) {
throw new UnsupportedOperationException("JavaLangAccess.blockedOn() not supported yet");
//t.blockedOn(b);
}
@Override
public void registerShutdownHook(int slot, Runnable r) {
throw new UnsupportedOperationException("JavaLangAccess.registerShutdownHook() not supported yet");
}
@Override
public int getStackTraceDepth(Throwable t) {
return t.getStackTraceDepth();
}
@Override
public StackTraceElement getStackTraceElement(Throwable t, int i) {
StackTraceElement[] st = t.getStackTrace();
return st[i];
}
};
}
static private native String[] getKeyValuePairs();
static private native PrintStream createSystemOut();
static private native PrintStream createSystemErr();
//--- standard streams
public static void setIn (InputStream newIn) {
in = newIn;
}
public static void setOut (PrintStream newOut){
out = newOut;
}
public static void setErr (PrintStream newErr) {
err = newErr;
}
public static Channel inheritedChannel() {
throw new UnsupportedOperationException("inheritedChannel() not yet supported");
}
//--- misc
public static native void exit (int rc);
public static native void arraycopy (Object src, int srcPos,
Object dst, int dstPos, int len);
public static native void gc();
public static native void runFinalization();
public static native void runFinalizersOnExit(boolean cond);
static native Class<?> getCallerClass();
public static native int identityHashCode (Object o);
//--- time management
public static native long currentTimeMillis();
public static native long nanoTime();
//--- environment
public static native String getenv (String key);
public static Map<String,String> getenv() {
throw new UnsupportedOperationException("getenv() not yet supported");
}
//--- security manager
static SecurityManager securityManager;
public static void setSecurityManager (SecurityManager newManager) {
securityManager = newManager;
}
public static SecurityManager getSecurityManager() {
return securityManager;
}
//--- system properties
public static Properties getProperties() {
return properties;
}
public static void setProperties(Properties newProps){
properties = newProps;
}
public static String getProperty (String key) {
return properties.getProperty(key);
}
public static String getProperty (String key, String def){
String v = properties.getProperty(key);
if (v == null){
return def;
} else {
return v;
}
}
public static String setProperty (String key, String value){
String oldVal = properties.getProperty(key);
properties.put(key,value);
return oldVal;
}
public static String clearProperty (String key) {
String oldVal = properties.getProperty(key);
properties.remove(key);
return oldVal;
}
//--- native libs
public static void load (String pathName) {
// nothing, we don't have native libs
// (maybe we could on-demand load peers?)
}
public static void loadLibrary (String libName){
// nothing yet
}
public static String mapLibraryName (String libName){
// just a placeholder (Unix flavor)
return "lib" + libName + ".so";
}
}
| 2,136 |
378 |
<gh_stars>100-1000
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tomee.jul.formatter;
import org.apache.juli.AsyncFileHandler;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.logging.ConsoleHandler;
import java.util.logging.Filter;
import java.util.logging.Formatter;
import java.util.logging.Level;
import java.util.logging.LogManager;
import java.util.logging.LogRecord;
public class AsyncConsoleHandler extends AsyncFileHandler {
private final ConsoleHandler delegate = new ConsoleHandler() {{
setFormatter(new SingleLineFormatter()); // console -> dev. File uses plain old format
}};
@Override
protected void publishInternal(final LogRecord record) {
delegate.publish(record);
}
public AsyncConsoleHandler() {
LogManager manager = LogManager.getLogManager();
String cname = getClass().getName();
final String lvl = manager.getProperty(cname + ".level");
if (lvl != null) {
delegate.setLevel(Level.parse(lvl));
}
final String filter = manager.getProperty(cname + ".filter");
if (filter != null) {
try {
delegate.setFilter(Filter.class.cast(ClassLoader.getSystemClassLoader().loadClass(filter).newInstance()));
} catch (final InstantiationException | IllegalAccessException | ClassNotFoundException e) {
// no-op like delegate
}
}
final String formatter = manager.getProperty(cname + ".formatter");
if (formatter != null) {
try {
delegate.setFormatter(Formatter.class.cast(ClassLoader.getSystemClassLoader().loadClass(formatter).newInstance()));
} catch (final InstantiationException | IllegalAccessException | ClassNotFoundException e) {
// no-op like delegate
}
}
try {
delegate.setEncoding(manager.getProperty(cname +".encoding"));
} catch (final Exception ex) {
// no-op
}
}
@Override
public void close() {
delegate.close();
}
@Override
public void flush() {
delegate.flush();
}
@Override
protected void open() {
// no-op
}
@Override
protected void closeWriter() {
// no-op
}
@Override
protected void openWriter() {
// no-op
}
// copy cause of classloading
private static class SingleLineFormatter extends Formatter {
private static final String SEP = System.getProperty("line.separator", "\n");
@SuppressWarnings("ThrowableResultOfMethodCallIgnored")
@Override
public synchronized String format(final LogRecord record) {
final boolean exception = record.getThrown() != null;
final StringBuilder sbuf = new StringBuilder();
sbuf.append(record.getLevel().getLocalizedName());
sbuf.append(" - ");
sbuf.append(this.formatMessage(record));
sbuf.append(SEP);
if (exception) {
try {
final StringWriter sw = new StringWriter();
final PrintWriter pw = new PrintWriter(sw);
record.getThrown().printStackTrace(pw);
pw.close();
sbuf.append(sw.toString());
} catch (final Exception ex) {
// no-op
}
}
return sbuf.toString();
}
}
}
| 1,683 |
1,755 |
/*=========================================================================
Program: Visualization Toolkit
Module: vtkXdmfHeavyData.cxx
Copyright (c) <NAME>, <NAME>, <NAME>
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#include "vtkXdmfHeavyData.h"
#include "vtkCellArray.h"
#include "vtkCellData.h"
#include "vtkCellTypes.h"
#include "vtkDataArrayRange.h"
#include "vtkDataObjectTypes.h"
#include "vtkDoubleArray.h"
#include "vtkExtractSelectedIds.h"
#include "vtkFloatArray.h"
#include "vtkIdTypeArray.h"
#include "vtkInformation.h"
#include "vtkMath.h"
#include "vtkMergePoints.h"
#include "vtkMultiBlockDataSet.h"
#include "vtkNew.h"
#include "vtkObjectFactory.h"
#include "vtkPointData.h"
#include "vtkPolyData.h"
#include "vtkRectilinearGrid.h"
#include "vtkSelection.h"
#include "vtkSelectionNode.h"
#include "vtkSmartPointer.h"
#include "vtkStructuredData.h"
#include "vtkStructuredGrid.h"
#include "vtkUniformGrid.h"
#include "vtkUnsignedCharArray.h"
#include "vtkUnstructuredGrid.h"
#include "vtkXdmfDataArray.h"
#include "vtkXdmfReader.h"
#include "vtkXdmfReaderInternal.h"
#include <algorithm>
#include <cassert>
#include <deque>
#include <numeric>
#include <type_traits>
#include <vector>
#include "vtk_libxml2.h"
#include VTKLIBXML2_HEADER(tree.h)
#ifdef VTK_USE_64BIT_IDS
typedef XdmfInt64 vtkXdmfIdType;
#else
typedef XdmfInt32 vtkXdmfIdType;
#endif
using namespace xdmf2;
static void vtkScaleExtents(int in_exts[6], int out_exts[6], int stride[3])
{
out_exts[0] = in_exts[0] / stride[0];
out_exts[1] = in_exts[1] / stride[0];
out_exts[2] = in_exts[2] / stride[1];
out_exts[3] = in_exts[3] / stride[1];
out_exts[4] = in_exts[4] / stride[2];
out_exts[5] = in_exts[5] / stride[2];
}
static void vtkGetDims(int exts[6], int dims[3])
{
dims[0] = exts[1] - exts[0] + 1;
dims[1] = exts[3] - exts[2] + 1;
dims[2] = exts[5] - exts[4] + 1;
}
//------------------------------------------------------------------------------
vtkXdmfHeavyData::vtkXdmfHeavyData(vtkXdmfDomain* domain, vtkAlgorithm* reader)
{
this->Reader = reader;
this->Piece = 0;
this->NumberOfPieces = 0;
this->GhostLevels = 0;
this->Extents[0] = this->Extents[2] = this->Extents[4] = 0;
this->Extents[1] = this->Extents[3] = this->Extents[5] = -1;
this->Domain = domain;
this->Stride[0] = this->Stride[1] = this->Stride[2] = 1;
}
//------------------------------------------------------------------------------
vtkXdmfHeavyData::~vtkXdmfHeavyData() = default;
//------------------------------------------------------------------------------
vtkDataObject* vtkXdmfHeavyData::ReadData()
{
if (this->Domain->GetNumberOfGrids() == 1)
{
// There's just 1 grid. Now in serial, this is all good. In parallel, we
// need to be care:
// 1. If the data is structured, we respect the update-extent and read
// accordingly.
// 2. If the data is unstructrued, we read only on the root node. The user
// can apply D3 or something to repartition the data.
return this->ReadData(this->Domain->GetGrid(0));
}
// this code is similar to ReadComposite() however we cannot use the same code
// since the API for getting the children differs on the domain and the grid.
bool distribute_leaf_nodes = this->NumberOfPieces > 1;
XdmfInt32 numChildren = this->Domain->GetNumberOfGrids();
int number_of_leaf_nodes = 0;
vtkMultiBlockDataSet* mb = vtkMultiBlockDataSet::New();
mb->SetNumberOfBlocks(numChildren);
for (XdmfInt32 cc = 0; cc < numChildren; cc++)
{
XdmfGrid* xmfChild = this->Domain->GetGrid(cc);
mb->GetMetaData(cc)->Set(vtkCompositeDataSet::NAME(), xmfChild->GetName());
bool child_is_leaf = (xmfChild->IsUniform() != 0);
if (!child_is_leaf || !distribute_leaf_nodes ||
(number_of_leaf_nodes % this->NumberOfPieces) == this->Piece)
{
// it's possible that the data has way too many blocks, in which case the
// reader didn't present the user with capabilities to select the actual
// leaf node blocks as is the norm, instead only top-level grids were
// shown. In that case we need to ensure that we skip grids the user
// wanted us to skip explicitly.
if (!this->Domain->GetGridSelection()->ArrayIsEnabled(xmfChild->GetName()))
{
continue;
}
vtkDataObject* childDO = this->ReadData(xmfChild);
if (childDO)
{
mb->SetBlock(cc, childDO);
childDO->Delete();
}
}
number_of_leaf_nodes += child_is_leaf ? 1 : 0;
}
return mb;
}
//------------------------------------------------------------------------------
vtkDataObject* vtkXdmfHeavyData::ReadData(XdmfGrid* xmfGrid, int blockId)
{
if (!xmfGrid || xmfGrid->GetGridType() == XDMF_GRID_UNSET)
{
// sanity check-ensure that the xmfGrid is valid.
return nullptr;
}
XdmfInt32 gridType = (xmfGrid->GetGridType() & XDMF_GRID_MASK);
if (gridType == XDMF_GRID_COLLECTION &&
xmfGrid->GetCollectionType() == XDMF_GRID_COLLECTION_TEMPORAL)
{
// grid is a temporal collection, pick the sub-grid with matching time and
// process that.
return this->ReadTemporalCollection(xmfGrid, blockId);
}
else if (gridType == XDMF_GRID_COLLECTION || gridType == XDMF_GRID_TREE)
{
return this->ReadComposite(xmfGrid);
}
// grid is a primitive grid, so read the data.
return this->ReadUniformData(xmfGrid, blockId);
}
//------------------------------------------------------------------------------
vtkDataObject* vtkXdmfHeavyData::ReadComposite(XdmfGrid* xmfComposite)
{
assert(((xmfComposite->GetGridType() & XDMF_GRID_COLLECTION &&
xmfComposite->GetCollectionType() != XDMF_GRID_COLLECTION_TEMPORAL) ||
(xmfComposite->GetGridType() & XDMF_GRID_TREE)) &&
"Input must be a spatial collection or a tree");
vtkMultiBlockDataSet* multiBlock = vtkMultiBlockDataSet::New();
XdmfInt32 numChildren = xmfComposite->GetNumberOfChildren();
multiBlock->SetNumberOfBlocks(numChildren);
bool distribute_leaf_nodes =
(xmfComposite->GetGridType() & XDMF_GRID_COLLECTION && this->NumberOfPieces > 1);
int number_of_leaf_nodes = 0;
for (XdmfInt32 cc = 0; cc < numChildren; cc++)
{
XdmfGrid* xmfChild = xmfComposite->GetChild(cc);
multiBlock->GetMetaData(cc)->Set(vtkCompositeDataSet::NAME(), xmfChild->GetName());
bool child_is_leaf = (xmfChild->IsUniform() != 0);
if (!child_is_leaf || !distribute_leaf_nodes ||
(number_of_leaf_nodes % this->NumberOfPieces) == this->Piece)
{
vtkDataObject* childDO = this->ReadData(xmfChild, cc);
if (childDO)
{
multiBlock->SetBlock(cc, childDO);
childDO->Delete();
}
}
number_of_leaf_nodes += child_is_leaf ? 1 : 0;
}
return multiBlock;
}
//------------------------------------------------------------------------------
vtkDataObject* vtkXdmfHeavyData::ReadTemporalCollection(
XdmfGrid* xmfTemporalCollection, int blockId)
{
assert(xmfTemporalCollection->GetGridType() & XDMF_GRID_COLLECTION &&
xmfTemporalCollection->GetCollectionType() == XDMF_GRID_COLLECTION_TEMPORAL &&
"Input must be a temporal collection");
// Find the children that are valid for the requested time (this->Time) and
// read only those.
// FIXME: I am tempted to remove support for supporting multiple matching
// sub-grids for a time-step since that changes the composite data hierarchy
// over time which makes it hard to use filters such as vtkExtractBlock etc.
std::deque<XdmfGrid*> valid_children;
for (XdmfInt32 cc = 0; cc < xmfTemporalCollection->GetNumberOfChildren(); cc++)
{
XdmfGrid* child = xmfTemporalCollection->GetChild(cc);
if (child)
{
// ensure that we set correct epsilon for comparison
// BUG #0013766.
child->GetTime()->SetEpsilon(VTK_DBL_EPSILON);
if (child->GetTime()->IsValid(this->Time, this->Time))
{
valid_children.push_back(child);
}
}
}
// if no child matched this timestep, handle the case where the user didn't
// specify any <Time /> element for the temporal collection.
for (XdmfInt32 cc = 0;
valid_children.empty() && cc < xmfTemporalCollection->GetNumberOfChildren(); cc++)
{
XdmfGrid* child = xmfTemporalCollection->GetChild(cc);
if (child && child->GetTime()->GetTimeType() == XDMF_TIME_UNSET)
{
valid_children.push_back(child);
}
}
if (valid_children.empty())
{
return nullptr;
}
std::deque<vtkSmartPointer<vtkDataObject>> child_data_objects;
std::deque<XdmfGrid*>::iterator iter;
for (iter = valid_children.begin(); iter != valid_children.end(); ++iter)
{
vtkDataObject* childDO = this->ReadData(*iter, blockId);
if (childDO)
{
child_data_objects.emplace_back(childDO);
childDO->Delete();
}
}
if (child_data_objects.size() == 1)
{
vtkDataObject* dataObject = child_data_objects[0];
dataObject->Register(nullptr);
return dataObject;
}
else if (child_data_objects.size() > 1)
{
vtkMultiBlockDataSet* mb = vtkMultiBlockDataSet::New();
mb->SetNumberOfBlocks(static_cast<unsigned int>(child_data_objects.size()));
for (unsigned int cc = 0; cc < static_cast<unsigned int>(child_data_objects.size()); cc++)
{
mb->SetBlock(cc, child_data_objects[cc]);
}
return mb;
}
return nullptr;
}
//------------------------------------------------------------------------------
// Read a non-composite grid. Note here uniform has nothing to do with
// vtkUniformGrid but to what Xdmf's GridType="Uniform".
vtkDataObject* vtkXdmfHeavyData::ReadUniformData(XdmfGrid* xmfGrid, int blockId)
{
assert(xmfGrid->IsUniform() && "Input must be a uniform xdmf grid.");
int vtk_data_type = this->Domain->GetVTKDataType(xmfGrid);
if (!this->Domain->GetGridSelection()->ArrayIsEnabled(xmfGrid->GetName()))
{
// simply create an empty data-object of the correct type and return it.
return vtkDataObjectTypes::NewDataObject(vtk_data_type);
}
// Read heavy data for grid geometry/topology. This does not read any
// data-arrays. They are read explicitly.
XdmfTopology* topo = xmfGrid->GetTopology();
XdmfGeometry* geom = xmfGrid->GetGeometry();
xmlChar* filePtr;
bool caching = true;
XdmfDOM* topoDom = topo->GetDOM();
XdmfXmlNode topoNode = topo->GetElement();
XdmfXmlNode topoNodeDataItem = topoDom->FindElement("DataItem", 0, topoNode);
std::string topoFilename = "NULL";
if (topoNodeDataItem && caching)
{
filePtr = topoNodeDataItem->children->content;
if (filePtr != nullptr)
{
topoFilename = reinterpret_cast<char*>(filePtr);
}
else
{
// vtkErrorWithObjectMacro(this->Reader, << "Cannot find DataItem element in topology xml, no
// caching possible");
caching = false;
}
}
else
{
caching = false;
}
XdmfDOM* geomDom = geom->GetDOM();
XdmfXmlNode geomNode = geom->GetElement();
XdmfXmlNode geomNodeDataItem = geomDom->FindElement("DataItem", 0, geomNode);
std::string geomFilename = "NULL";
if (geomNodeDataItem && caching)
{
filePtr = geomNodeDataItem->children->content;
if (filePtr != nullptr)
{
geomFilename = reinterpret_cast<char*>(filePtr);
}
else
{
vtkErrorWithObjectMacro(
this->Reader, << "Cannot find DataItem element in geometry xml, no caching possible");
caching = false;
}
}
else
{
caching = false;
}
vtkXdmfReader::XdmfReaderCachedData& cache =
vtkXdmfReader::SafeDownCast(this->Reader)->GetDataSetCache();
vtkXdmfReader::XdmfDataSetTopoGeoPath& cachedData = cache[blockId];
if (caching && (cachedData.topologyPath == topoFilename) &&
(cachedData.geometryPath == geomFilename))
{
vtkDataSet* ds = vtkDataSet::SafeDownCast(
vtkDataObjectTypes::NewDataObject(cachedData.dataset->GetDataObjectType()));
ds->ShallowCopy(cachedData.dataset);
this->ReadAttributes(ds, xmfGrid);
return ds;
}
if (caching)
{
cachedData.topologyPath = topoFilename;
cachedData.geometryPath = geomFilename;
if (cache[blockId].dataset != nullptr)
{
cache[blockId].dataset->Delete();
cache[blockId].dataset = nullptr;
}
}
XdmfInt32 status = xmfGrid->Update();
if (status == XDMF_FAIL)
{
return nullptr;
}
vtkDataObject* dataObject = nullptr;
switch (vtk_data_type)
{
case VTK_UNIFORM_GRID:
dataObject = this->RequestImageData(xmfGrid, true);
break;
case VTK_IMAGE_DATA:
dataObject = this->RequestImageData(xmfGrid, false);
break;
case VTK_STRUCTURED_GRID:
dataObject = this->RequestStructuredGrid(xmfGrid);
break;
case VTK_RECTILINEAR_GRID:
dataObject = this->RequestRectilinearGrid(xmfGrid);
break;
case VTK_UNSTRUCTURED_GRID:
dataObject = this->ReadUnstructuredGrid(xmfGrid);
break;
default:
// un-handled case.
return nullptr;
}
if (caching)
{
cache[blockId].dataset = vtkDataSet::SafeDownCast(dataObject);
dataObject->Register(nullptr);
}
return dataObject;
}
//------------------------------------------------------------------------------
int vtkXdmfHeavyData::GetNumberOfPointsPerCell(int vtk_cell_type)
{
switch (vtk_cell_type)
{
case VTK_POLY_VERTEX:
return 0;
case VTK_POLY_LINE:
return 0;
case VTK_POLYGON:
return 0;
case VTK_TRIANGLE:
return 3;
case VTK_QUAD:
return 4;
case VTK_TETRA:
return 4;
case VTK_PYRAMID:
return 5;
case VTK_WEDGE:
return 6;
case VTK_HEXAHEDRON:
return 8;
case VTK_QUADRATIC_EDGE:
return 3;
case VTK_QUADRATIC_TRIANGLE:
return 6;
case VTK_QUADRATIC_QUAD:
return 8;
case VTK_BIQUADRATIC_QUAD:
return 9;
case VTK_QUADRATIC_TETRA:
return 10;
case VTK_QUADRATIC_PYRAMID:
return 13;
case VTK_QUADRATIC_WEDGE:
return 15;
case VTK_BIQUADRATIC_QUADRATIC_WEDGE:
return 18;
case VTK_QUADRATIC_HEXAHEDRON:
return 20;
case VTK_BIQUADRATIC_QUADRATIC_HEXAHEDRON:
return 24;
case VTK_TRIQUADRATIC_HEXAHEDRON:
return 24;
}
return -1;
}
//------------------------------------------------------------------------------
int vtkXdmfHeavyData::GetVTKCellType(XdmfInt32 topologyType)
{
switch (topologyType)
{
case XDMF_POLYVERTEX:
return VTK_POLY_VERTEX;
case XDMF_POLYLINE:
return VTK_POLY_LINE;
case XDMF_POLYGON:
return VTK_POLYGON; // FIXME: should this not be treated as mixed?
case XDMF_TRI:
return VTK_TRIANGLE;
case XDMF_QUAD:
return VTK_QUAD;
case XDMF_TET:
return VTK_TETRA;
case XDMF_PYRAMID:
return VTK_PYRAMID;
case XDMF_WEDGE:
return VTK_WEDGE;
case XDMF_HEX:
return VTK_HEXAHEDRON;
case XDMF_EDGE_3:
return VTK_QUADRATIC_EDGE;
case XDMF_TRI_6:
return VTK_QUADRATIC_TRIANGLE;
case XDMF_QUAD_8:
return VTK_QUADRATIC_QUAD;
case XDMF_QUAD_9:
return VTK_BIQUADRATIC_QUAD;
case XDMF_TET_10:
return VTK_QUADRATIC_TETRA;
case XDMF_PYRAMID_13:
return VTK_QUADRATIC_PYRAMID;
case XDMF_WEDGE_15:
return VTK_QUADRATIC_WEDGE;
case XDMF_WEDGE_18:
return VTK_BIQUADRATIC_QUADRATIC_WEDGE;
case XDMF_HEX_20:
return VTK_QUADRATIC_HEXAHEDRON;
case XDMF_HEX_24:
return VTK_BIQUADRATIC_QUADRATIC_HEXAHEDRON;
case XDMF_HEX_27:
return VTK_TRIQUADRATIC_HEXAHEDRON;
case XDMF_MIXED:
return VTK_NUMBER_OF_CELL_TYPES;
}
// XdmfErrorMessage("Unknown Topology Type = "
// << xmfGrid->GetTopology()->GetTopologyType());
return VTK_EMPTY_CELL;
}
//------------------------------------------------------------------------------
vtkDataObject* vtkXdmfHeavyData::ReadUnstructuredGrid(XdmfGrid* xmfGrid)
{
vtkSmartPointer<vtkUnstructuredGrid> ugData = vtkSmartPointer<vtkUnstructuredGrid>::New();
// BUG #12527. For non-partitioned data, don't read unstructured grid on
// process id > 0.
if (this->Piece != 0 && this->Domain->GetNumberOfGrids() == 1 &&
this->Domain->GetVTKDataType() == VTK_UNSTRUCTURED_GRID &&
this->Domain->GetSetsSelection()->GetNumberOfArrays() == 0)
{
ugData->Register(nullptr);
return ugData;
}
XdmfTopology* xmfTopology = xmfGrid->GetTopology();
XdmfArray* xmfConnectivity = xmfTopology->GetConnectivity();
int vtk_cell_type = vtkXdmfHeavyData::GetVTKCellType(xmfTopology->GetTopologyType());
if (vtk_cell_type == VTK_EMPTY_CELL)
{
// invalid topology.
return nullptr;
}
if (vtk_cell_type != VTK_NUMBER_OF_CELL_TYPES)
// i.e. topologyType != XDMF_MIXED
{
// all cells are of the same type.
XdmfInt32 numPointsPerCell = xmfTopology->GetNodesPerElement();
// FIXME: is this needed, shouldn't xmfTopology->GetNodesPerElement()
// return the correct value always?
if (xmfConnectivity->GetRank() == 2)
{
numPointsPerCell = xmfConnectivity->GetDimension(1);
}
/* Create Cell Type Array */
XdmfInt64 conn_length = xmfConnectivity->GetNumberOfElements();
std::vector<XdmfInt64> xmfConnections(conn_length);
xmfConnectivity->GetValues(0, xmfConnections.data(), conn_length);
vtkIdType numCells = xmfTopology->GetShapeDesc()->GetNumberOfElements();
vtkNew<vtkIdTypeArray> conn;
vtkNew<vtkIdTypeArray> offsets;
offsets->SetNumberOfTuples(numCells + 1);
{ // Fill offsets: {0, 1 * cellSize, 2 * cellSize, ..., numCells * cellSize}
vtkIdType offset = -numPointsPerCell;
auto generator = [&]() -> vtkIdType { return offset += numPointsPerCell; };
auto range = vtk::DataArrayValueRange<1>(offsets);
std::generate(range.begin(), range.end(), generator);
}
conn->SetNumberOfTuples(numPointsPerCell * numCells);
// Fill connections (just copy xmfConnections)
{ // Need to convert explicitly to silence warnings:
auto range = vtk::DataArrayValueRange<1>(conn);
std::transform(xmfConnections.cbegin(),
xmfConnections.cbegin() + (numPointsPerCell * numCells), range.begin(),
[](XdmfInt64 val) -> vtkIdType { return static_cast<vtkIdType>(val); });
}
// Construct and set the cell array
vtkNew<vtkCellArray> cells;
cells->SetData(offsets, conn);
ugData->SetCells(vtk_cell_type, cells);
}
else
{
// We have cells with mixed types.
XdmfInt64 conn_length = xmfGrid->GetTopology()->GetConnectivity()->GetNumberOfElements();
std::vector<XdmfInt64> xmfConnections(static_cast<size_t>(conn_length));
xmfConnectivity->GetValues(0, xmfConnections.data(), conn_length);
vtkIdType numCells = xmfTopology->GetShapeDesc()->GetNumberOfElements();
vtkNew<vtkUnsignedCharArray> cell_types;
cell_types->SetNumberOfTuples(numCells);
vtkNew<vtkIdTypeArray> offsets;
offsets->SetNumberOfTuples(numCells + 1);
vtkNew<vtkIdTypeArray> conn;
// This may be an overestimate; will correct after filling.
conn->SetNumberOfTuples(static_cast<vtkIdType>(conn_length));
vtkIdType offset = 0;
vtkIdType index = 0;
vtkIdType connIndex = 0;
for (vtkIdType cc = 0; cc < numCells; cc++)
{
int vtk_cell_typeI = this->GetVTKCellType(xmfConnections[index++]);
XdmfInt32 numPointsPerCell = this->GetNumberOfPointsPerCell(vtk_cell_typeI);
if (numPointsPerCell == -1)
{
// encountered an unknown cell.
return nullptr;
}
if (numPointsPerCell == 0)
{
// cell type does not have a fixed number of points in which case the
// next entry in xmfConnections tells us the number of points.
numPointsPerCell = xmfConnections[index++];
}
cell_types->SetValue(cc, static_cast<unsigned char>(vtk_cell_typeI));
offsets->SetValue(cc, offset);
offset += numPointsPerCell;
for (vtkIdType i = 0; i < numPointsPerCell; i++)
{
conn->SetValue(connIndex++, xmfConnections[index++]);
}
}
offsets->SetValue(numCells, offset); // final offset value
// Resize the Array to the Proper Size
conn->Resize(connIndex);
// Create and set the cell array:
vtkNew<vtkCellArray> cells;
cells->SetData(offsets, conn);
ugData->SetCells(cell_types, cells);
}
// Read the geometry.
vtkPoints* points = this->ReadPoints(xmfGrid->GetGeometry());
if (!points)
{
// failed to read points.
return nullptr;
}
ugData->SetPoints(points);
points->Delete();
this->ReadAttributes(ugData, xmfGrid);
// Read ghost cell/point information.
this->ReadGhostSets(ugData, xmfGrid);
// If this grid has sets defined on it, then we need to read those as well
vtkMultiBlockDataSet* sets = this->ReadSets(ugData, xmfGrid);
if (sets)
{
return sets;
}
ugData->Register(nullptr);
return ugData;
}
inline bool vtkExtentsAreValid(int exts[6])
{
return exts[1] >= exts[0] && exts[3] >= exts[2] && exts[5] >= exts[4];
}
inline bool vtkExtentsAreEqual(int* exts1, int* exts2)
{
if (!exts1 && !exts2)
{
return true;
}
if (!exts1 || !exts2)
{
return false;
}
return (exts1[0] == exts2[0] && exts1[1] == exts2[1] && exts1[2] == exts2[2] &&
exts1[3] == exts2[3] && exts1[4] == exts2[4] && exts1[5] == exts2[5]);
}
//------------------------------------------------------------------------------
vtkRectilinearGrid* vtkXdmfHeavyData::RequestRectilinearGrid(XdmfGrid* xmfGrid)
{
vtkSmartPointer<vtkRectilinearGrid> rg = vtkSmartPointer<vtkRectilinearGrid>::New();
int whole_extents[6];
int update_extents[6];
this->Domain->GetWholeExtent(xmfGrid, whole_extents);
if (!vtkExtentsAreValid(this->Extents))
{
// if this->Extents are not valid, then simply read the whole image.
memcpy(update_extents, whole_extents, sizeof(int) * 6);
}
else
{
memcpy(update_extents, this->Extents, sizeof(int) * 6);
}
// convert to stridden update extents.
int scaled_extents[6];
vtkScaleExtents(update_extents, scaled_extents, this->Stride);
int scaled_dims[3];
vtkGetDims(scaled_extents, scaled_dims);
rg->SetExtent(scaled_extents);
// Now read rectilinear geometry.
XdmfGeometry* xmfGeometry = xmfGrid->GetGeometry();
vtkSmartPointer<vtkDoubleArray> xarray = vtkSmartPointer<vtkDoubleArray>::New();
xarray->SetNumberOfTuples(scaled_dims[0]);
vtkSmartPointer<vtkDoubleArray> yarray = vtkSmartPointer<vtkDoubleArray>::New();
yarray->SetNumberOfTuples(scaled_dims[1]);
vtkSmartPointer<vtkDoubleArray> zarray = vtkSmartPointer<vtkDoubleArray>::New();
zarray->SetNumberOfTuples(scaled_dims[2]);
rg->SetXCoordinates(xarray);
rg->SetYCoordinates(yarray);
rg->SetZCoordinates(zarray);
switch (xmfGeometry->GetGeometryType())
{
case XDMF_GEOMETRY_ORIGIN_DXDY:
case XDMF_GEOMETRY_ORIGIN_DXDYDZ:
{
XdmfFloat64* origin = xmfGeometry->GetOrigin();
XdmfFloat64* dxdydz = xmfGeometry->GetDxDyDz();
for (int cc = scaled_extents[0]; cc <= scaled_extents[1]; cc++)
{
xarray->GetPointer(0)[cc - scaled_extents[0]] =
origin[0] + (dxdydz[0] * cc * this->Stride[0]);
}
for (int cc = scaled_extents[2]; cc <= scaled_extents[3]; cc++)
{
yarray->GetPointer(0)[cc - scaled_extents[2]] =
origin[1] + (dxdydz[1] * cc * this->Stride[1]);
}
for (int cc = scaled_extents[4]; cc <= scaled_extents[5]; cc++)
{
zarray->GetPointer(0)[cc - scaled_extents[4]] =
origin[2] + (dxdydz[2] * cc * this->Stride[2]);
}
}
break;
case XDMF_GEOMETRY_VXVY:
{
// Note:
// XDMF and VTK structured extents are reversed
// Where I varies fastest, VTK's convention is IJK, but XDMF's is KJI
// However, users naturally don't want VXVY to mean VZVY.
// Let's accept VisIt's interpretation of this 2D case
// (KJI is ZXY where Z=0).
xarray->SetNumberOfTuples(scaled_dims[1]);
yarray->SetNumberOfTuples(scaled_dims[2]);
zarray->SetNumberOfTuples(scaled_dims[0]);
rg->SetExtent(scaled_extents[2], scaled_extents[3], scaled_extents[4], scaled_extents[5],
scaled_extents[0], scaled_extents[1]);
xmfGeometry->GetVectorX()->GetValues(
update_extents[2], xarray->GetPointer(0), scaled_dims[1], this->Stride[1]);
xmfGeometry->GetVectorY()->GetValues(
update_extents[4], yarray->GetPointer(0), scaled_dims[2], this->Stride[2]);
zarray->FillComponent(0, 0);
}
break;
case XDMF_GEOMETRY_VXVYVZ:
{
xmfGeometry->GetVectorX()->GetValues(
update_extents[0], xarray->GetPointer(0), scaled_dims[0], this->Stride[0]);
xmfGeometry->GetVectorY()->GetValues(
update_extents[2], yarray->GetPointer(0), scaled_dims[1], this->Stride[1]);
xmfGeometry->GetVectorZ()->GetValues(
update_extents[4], zarray->GetPointer(0), scaled_dims[2], this->Stride[2]);
}
break;
default:
vtkErrorWithObjectMacro(this->Reader,
"Geometry type : " << xmfGeometry->GetGeometryTypeAsString() << " is not supported for "
<< xmfGrid->GetTopology()->GetTopologyTypeAsString());
return nullptr;
}
this->ReadAttributes(rg, xmfGrid, update_extents);
rg->Register(nullptr);
return rg;
}
//------------------------------------------------------------------------------
vtkStructuredGrid* vtkXdmfHeavyData::RequestStructuredGrid(XdmfGrid* xmfGrid)
{
vtkStructuredGrid* sg = vtkStructuredGrid::New();
int whole_extents[6];
int update_extents[6];
this->Domain->GetWholeExtent(xmfGrid, whole_extents);
if (!vtkExtentsAreValid(this->Extents))
{
// if this->Extents are not valid, then simply read the whole image.
memcpy(update_extents, whole_extents, sizeof(int) * 6);
}
else
{
memcpy(update_extents, this->Extents, sizeof(int) * 6);
}
int scaled_extents[6];
vtkScaleExtents(update_extents, scaled_extents, this->Stride);
sg->SetExtent(scaled_extents);
vtkPoints* points = this->ReadPoints(xmfGrid->GetGeometry(), update_extents, whole_extents);
sg->SetPoints(points);
points->Delete();
this->ReadAttributes(sg, xmfGrid, update_extents);
return sg;
}
//------------------------------------------------------------------------------
vtkImageData* vtkXdmfHeavyData::RequestImageData(XdmfGrid* xmfGrid, bool use_uniform_grid)
{
vtkImageData* imageData =
use_uniform_grid ? static_cast<vtkImageData*>(vtkUniformGrid::New()) : vtkImageData::New();
int whole_extents[6];
this->Domain->GetWholeExtent(xmfGrid, whole_extents);
int update_extents[6];
if (!vtkExtentsAreValid(this->Extents))
{
// if this->Extents are not valid, then simply read the whole image.
memcpy(update_extents, whole_extents, sizeof(int) * 6);
}
else
{
memcpy(update_extents, this->Extents, sizeof(int) * 6);
}
int scaled_extents[6];
vtkScaleExtents(update_extents, scaled_extents, this->Stride);
imageData->SetExtent(scaled_extents);
double origin[3], spacing[3];
if (!this->Domain->GetOriginAndSpacing(xmfGrid, origin, spacing))
{
vtkErrorWithObjectMacro(this->Reader,
"Could not determine image-data origin and spacing. "
"Required geometry type is ORIGIN_DXDY or ORIGIN_DXDYDZ. "
"The specified geometry type is : "
<< xmfGrid->GetGeometry()->GetGeometryTypeAsString());
// release image data.
imageData->Delete();
return nullptr;
}
imageData->SetOrigin(origin);
imageData->SetSpacing(
spacing[0] * this->Stride[0], spacing[1] * this->Stride[1], spacing[2] * this->Stride[2]);
this->ReadAttributes(imageData, xmfGrid, update_extents);
return imageData;
}
//------------------------------------------------------------------------------
vtkPoints* vtkXdmfHeavyData::ReadPoints(
XdmfGeometry* xmfGeometry, int* update_extents /*=nullptr*/, int* whole_extents /*=nullptr*/)
{
XdmfInt32 geomType = xmfGeometry->GetGeometryType();
if (geomType != XDMF_GEOMETRY_X_Y_Z && geomType != XDMF_GEOMETRY_XYZ &&
geomType != XDMF_GEOMETRY_X_Y && geomType != XDMF_GEOMETRY_XY)
{
return nullptr;
}
XdmfArray* xmfPoints = xmfGeometry->GetPoints();
if (!xmfPoints)
{
XdmfErrorMessage("No Points to Set");
return nullptr;
}
vtkSmartPointer<vtkPoints> points = vtkSmartPointer<vtkPoints>::New();
if (xmfPoints->GetNumberType() == XDMF_FLOAT32_TYPE)
{
vtkFloatArray* da = vtkFloatArray::New();
da->SetNumberOfComponents(3);
points->SetData(da);
da->Delete();
}
else // means == XDMF_FLOAT64_TYPE
{
vtkDoubleArray* da = vtkDoubleArray::New();
da->SetNumberOfComponents(3);
points->SetData(da);
da->Delete();
}
XdmfInt64 numGeometryPoints = xmfGeometry->GetNumberOfPoints();
vtkIdType numPoints = numGeometryPoints;
bool structured_data = false;
if (update_extents && whole_extents)
{
// we are reading a sub-extent.
structured_data = true;
int scaled_extents[6];
int scaled_dims[3];
vtkScaleExtents(update_extents, scaled_extents, this->Stride);
vtkGetDims(scaled_extents, scaled_dims);
numPoints = (scaled_dims[0] * scaled_dims[1] * scaled_dims[2]);
}
points->SetNumberOfPoints(numPoints);
if (!structured_data)
{
// read all the points.
switch (points->GetData()->GetDataType())
{
case VTK_DOUBLE:
xmfPoints->GetValues(
0, reinterpret_cast<double*>(points->GetVoidPointer(0)), numPoints * 3);
break;
case VTK_FLOAT:
xmfPoints->GetValues(0, reinterpret_cast<float*>(points->GetVoidPointer(0)), numPoints * 3);
break;
default:
return nullptr;
}
}
else
{
// treating the points as structured points
std::vector<XdmfFloat64> tempPoints(numGeometryPoints * 3);
xmfPoints->GetValues(0, tempPoints.data(), numGeometryPoints * 3);
vtkIdType pointId = 0;
int xdmf_dims[3];
vtkGetDims(whole_extents, xdmf_dims);
for (int z = update_extents[4]; z <= update_extents[5]; z++)
{
if ((z - update_extents[4]) % this->Stride[2])
{
continue;
}
for (int y = update_extents[2]; y <= update_extents[3]; y++)
{
if ((y - update_extents[2]) % this->Stride[1])
{
continue;
}
for (int x = update_extents[0]; x <= update_extents[1]; x++)
{
if ((x - update_extents[0]) % this->Stride[0])
{
continue;
}
int xdmf_index[3] = { x, y, z };
XdmfInt64 offset = vtkStructuredData::ComputePointId(xdmf_dims, xdmf_index);
points->SetPoint(pointId, tempPoints[3 * offset], tempPoints[3 * offset + 1],
tempPoints[3 * offset + 2]);
pointId++;
}
}
}
}
points->Register(nullptr);
return points;
}
//------------------------------------------------------------------------------
bool vtkXdmfHeavyData::ReadAttributes(vtkDataSet* dataSet, XdmfGrid* xmfGrid, int* update_extents)
{
int data_dimensionality = vtkXdmfDomain::GetDataDimensionality(xmfGrid);
int numAttributes = xmfGrid->GetNumberOfAttributes();
for (int cc = 0; cc < numAttributes; cc++)
{
XdmfAttribute* xmfAttribute = xmfGrid->GetAttribute(cc);
const char* attrName = xmfAttribute->GetName();
int attrCenter = xmfAttribute->GetAttributeCenter();
if (!attrName)
{
vtkWarningWithObjectMacro(this->Reader, "Skipping unnamed attributes.");
continue;
}
vtkFieldData* fieldData = nullptr;
// skip disabled arrays.
switch (attrCenter)
{
case XDMF_ATTRIBUTE_CENTER_GRID:
fieldData = dataSet->GetFieldData();
break;
case XDMF_ATTRIBUTE_CENTER_CELL:
if (!this->Domain->GetCellArraySelection()->ArrayIsEnabled(attrName))
{
continue;
}
fieldData = dataSet->GetCellData();
break;
case XDMF_ATTRIBUTE_CENTER_NODE:
if (!this->Domain->GetPointArraySelection()->ArrayIsEnabled(attrName))
{
continue;
}
fieldData = dataSet->GetPointData();
break;
case XDMF_ATTRIBUTE_CENTER_FACE:
case XDMF_ATTRIBUTE_CENTER_EDGE:
default:
vtkWarningWithObjectMacro(this->Reader,
"Skipping attribute " << attrName << " at "
<< xmfAttribute->GetAttributeCenterAsString());
continue; // unhandled.
}
vtkDataArray* array = this->ReadAttribute(xmfAttribute, data_dimensionality, update_extents);
if (array)
{
array->SetName(attrName);
fieldData->AddArray(array);
bool is_active = xmfAttribute->GetActive() != 0;
vtkDataSetAttributes* attributes = vtkDataSetAttributes::SafeDownCast(fieldData);
if (attributes)
{
// make attribute active.
switch (xmfAttribute->GetAttributeType())
{
case XDMF_ATTRIBUTE_TYPE_SCALAR:
if (is_active || attributes->GetScalars() == nullptr)
{
attributes->SetActiveScalars(attrName);
}
break;
case XDMF_ATTRIBUTE_TYPE_VECTOR:
if (is_active || attributes->GetVectors() == nullptr)
{
attributes->SetActiveVectors(attrName);
}
break;
case XDMF_ATTRIBUTE_TYPE_TENSOR:
case XDMF_ATTRIBUTE_TYPE_TENSOR6:
if (is_active || attributes->GetTensors() == nullptr)
{
attributes->SetActiveTensors(attrName);
}
break;
case XDMF_ATTRIBUTE_TYPE_GLOBALID:
if (is_active || attributes->GetGlobalIds() == nullptr)
{
attributes->SetActiveGlobalIds(attrName);
}
}
}
array->Delete();
}
}
return true;
}
// used to convert a symmetric tensor to a regular tensor.
template <class T>
void vtkConvertTensor6(T* source, T* dest, vtkIdType numTensors)
{
for (vtkIdType cc = 0; cc < numTensors; cc++)
{
dest[cc * 9 + 0] = source[cc * 6 + 0];
dest[cc * 9 + 1] = source[cc * 6 + 1];
dest[cc * 9 + 2] = source[cc * 6 + 2];
dest[cc * 9 + 3] = source[cc * 6 + 1];
dest[cc * 9 + 4] = source[cc * 6 + 3];
dest[cc * 9 + 5] = source[cc * 6 + 4];
dest[cc * 9 + 6] = source[cc * 6 + 2];
dest[cc * 9 + 7] = source[cc * 6 + 4];
dest[cc * 9 + 8] = source[cc * 6 + 5];
}
}
//------------------------------------------------------------------------------
vtkDataArray* vtkXdmfHeavyData::ReadAttribute(
XdmfAttribute* xmfAttribute, int data_dimensionality, int* update_extents /*=0*/)
{
if (!xmfAttribute)
{
return nullptr;
}
int attrType = xmfAttribute->GetAttributeType();
int attrCenter = xmfAttribute->GetAttributeCenter();
int numComponents = 1;
XdmfDataItem xmfDataItem;
xmfDataItem.SetDOM(xmfAttribute->GetDOM());
xmfDataItem.SetElement(xmfAttribute->GetDOM()->FindDataElement(0, xmfAttribute->GetElement()));
xmfDataItem.UpdateInformation();
XdmfInt64 data_dims[XDMF_MAX_DIMENSION];
int data_rank = xmfDataItem.GetDataDesc()->GetShape(data_dims);
switch (attrType)
{
case XDMF_ATTRIBUTE_TYPE_TENSOR:
numComponents = 9;
break;
case XDMF_ATTRIBUTE_TYPE_TENSOR6:
numComponents = 6;
break;
case XDMF_ATTRIBUTE_TYPE_VECTOR:
numComponents = 3;
break;
case XDMF_ATTRIBUTE_TYPE_MATRIX:
numComponents = data_dims[data_rank - 1];
break;
default:
numComponents = 1;
break;
}
// Handle 2D vectors
if (attrType == XDMF_ATTRIBUTE_TYPE_VECTOR && data_dims[data_rank - 1] == 2)
{
numComponents = 2;
}
if (update_extents && attrCenter != XDMF_ATTRIBUTE_CENTER_GRID)
{
// for hyperslab selection to work, the data shape must match the topology
// shape.
if (data_rank < 0)
{
vtkErrorWithObjectMacro(this->Reader, "Unsupported attribute rank: " << data_rank);
return nullptr;
}
if (data_rank > (data_dimensionality + 1))
{
vtkErrorWithObjectMacro(
this->Reader, "The data_dimensionality and topology dimensionality mismatch");
return nullptr;
}
XdmfInt64 start[4] = { update_extents[4], update_extents[2], update_extents[0], 0 };
XdmfInt64 stride[4] = { this->Stride[2], this->Stride[1], this->Stride[0], 1 };
XdmfInt64 count[4] = { 0, 0, 0, 0 };
int scaled_dims[3];
int scaled_extents[6];
vtkScaleExtents(update_extents, scaled_extents, this->Stride);
vtkGetDims(scaled_extents, scaled_dims);
count[0] = (scaled_dims[2] - 1);
count[1] = (scaled_dims[1] - 1);
count[2] = (scaled_dims[0] - 1);
if (data_rank == (data_dimensionality + 1))
{
// this refers the number of components in the attribute.
count[data_dimensionality] = data_dims[data_dimensionality];
}
if (attrCenter == XDMF_ATTRIBUTE_CENTER_NODE)
{
// Point count is 1 + cell extent if not a single layer
count[0] += 1; //((update_extents[5] - update_extents[4]) > 0)? 1 : 0;
count[1] += 1; //((update_extents[3] - update_extents[2]) > 0)? 1 : 0;
count[2] += 1; //((update_extents[1] - update_extents[0]) > 0)? 1 : 0;
}
xmfDataItem.GetDataDesc()->SelectHyperSlab(start, stride, count);
}
if (xmfDataItem.Update() == XDMF_FAIL)
{
vtkErrorWithObjectMacro(this->Reader, "Failed to read attribute data");
return nullptr;
}
vtkXdmfDataArray* xmfConvertor = vtkXdmfDataArray::New();
vtkDataArray* dataArray = xmfConvertor->FromXdmfArray(
xmfDataItem.GetArray()->GetTagName(), 1, data_rank, numComponents, 0);
xmfConvertor->Delete();
if (attrType == XDMF_ATTRIBUTE_TYPE_TENSOR6)
{
// convert Tensor6 to Tensor.
vtkDataArray* tensor = dataArray->NewInstance();
vtkIdType numTensors = dataArray->GetNumberOfTuples();
tensor->SetNumberOfComponents(9);
tensor->SetNumberOfTuples(numTensors);
// Copy Symmetrical Tensor Values to Correct Positions in 3x3 matrix
void* source = dataArray->GetVoidPointer(0);
void* dest = tensor->GetVoidPointer(0);
switch (tensor->GetDataType())
{
vtkTemplateMacro(vtkConvertTensor6(
reinterpret_cast<VTK_TT*>(source), reinterpret_cast<VTK_TT*>(dest), numTensors));
}
dataArray->Delete();
return tensor;
}
if (attrType == XDMF_ATTRIBUTE_TYPE_VECTOR && numComponents == 2)
{
// convert 2D vectors to 3-tuple vectors with 0.0 in the z component
vtkDataArray* vector3D = dataArray->NewInstance();
vtkIdType numVectors = dataArray->GetNumberOfTuples();
vector3D->SetNumberOfComponents(3);
vector3D->SetNumberOfTuples(numVectors);
// Add 0.0 to third component of vector
auto inputRange = vtk::DataArrayTupleRange<2>(dataArray);
auto outputRange = vtk::DataArrayTupleRange<3>(vector3D);
for (auto i = 0; i < inputRange.size(); ++i)
{
outputRange[i][0] = inputRange[i][0];
outputRange[i][1] = inputRange[i][1];
outputRange[i][2] = 0.0;
}
dataArray->Delete();
return vector3D;
}
return dataArray;
}
//------------------------------------------------------------------------------
// Read ghost cell/point information. This is simply loaded info a
// vtkGhostType attribute array.
bool vtkXdmfHeavyData::ReadGhostSets(
vtkDataSet* dataSet, XdmfGrid* xmfGrid, int* vtkNotUsed(update_extents) /*=0*/)
{
// int data_dimensionality = this->Domain->GetDataDimensionality(xmfGrid);
for (int cc = 0; cc < xmfGrid->GetNumberOfSets(); cc++)
{
XdmfSet* xmfSet = xmfGrid->GetSets(cc);
int ghost_value = xmfSet->GetGhost();
if (ghost_value <= 0)
{
// not a ghost-set, simply continue.
continue;
}
XdmfInt32 setCenter = xmfSet->GetSetType();
vtkIdType numElems = 0;
vtkDataSetAttributes* dsa = nullptr;
unsigned char ghostFlag = 0;
switch (setCenter)
{
case XDMF_SET_TYPE_NODE:
dsa = dataSet->GetPointData();
numElems = dataSet->GetNumberOfPoints();
ghostFlag = vtkDataSetAttributes::DUPLICATEPOINT;
break;
case XDMF_SET_TYPE_CELL:
dsa = dataSet->GetCellData();
numElems = dataSet->GetNumberOfCells();
ghostFlag = vtkDataSetAttributes::DUPLICATECELL;
break;
default:
vtkWarningWithObjectMacro(
this->Reader, "Only ghost-cells and ghost-nodes are currently supported.");
continue;
}
vtkUnsignedCharArray* ghosts =
vtkArrayDownCast<vtkUnsignedCharArray>(dsa->GetArray(vtkDataSetAttributes::GhostArrayName()));
if (!ghosts)
{
ghosts = vtkUnsignedCharArray::New();
ghosts->SetName(vtkDataSetAttributes::GhostArrayName());
ghosts->SetNumberOfComponents(1);
ghosts->SetNumberOfTuples(numElems);
ghosts->FillComponent(0, 0);
dsa->AddArray(ghosts);
ghosts->Delete();
}
unsigned char* ptrGhosts = ghosts->GetPointer(0);
// Read heavy data. We cannot do anything smart if update_extents or stride
// is specified here. We have to read the entire set and then prune it.
xmfSet->Update();
XdmfArray* xmfIds = xmfSet->GetIds();
XdmfInt64 numIds = xmfIds->GetNumberOfElements();
std::vector<XdmfInt64> ids(numIds + 1);
xmfIds->GetValues(0, ids.data(), numIds);
// release the heavy data that was read.
xmfSet->Release();
for (vtkIdType kk = 0; kk < numIds; kk++)
{
if (ids[kk] < 0 || ids[kk] > numElems)
{
vtkWarningWithObjectMacro(this->Reader, "No such cell or point exists: " << ids[kk]);
continue;
}
ptrGhosts[ids[kk]] = ghostFlag;
}
}
return true;
}
//------------------------------------------------------------------------------
vtkMultiBlockDataSet* vtkXdmfHeavyData::ReadSets(
vtkDataSet* dataSet, XdmfGrid* xmfGrid, int* vtkNotUsed(update_extents) /*=0*/)
{
unsigned int number_of_sets = 0;
for (int cc = 0; cc < xmfGrid->GetNumberOfSets(); cc++)
{
XdmfSet* xmfSet = xmfGrid->GetSets(cc);
int ghost_value = xmfSet->GetGhost();
if (ghost_value != 0)
{
// skip ghost-sets.
continue;
}
number_of_sets++;
}
if (number_of_sets == 0)
{
return nullptr;
}
vtkMultiBlockDataSet* mb = vtkMultiBlockDataSet::New();
mb->SetNumberOfBlocks(1 + number_of_sets);
mb->SetBlock(0, dataSet);
mb->GetMetaData(static_cast<unsigned int>(0))->Set(vtkCompositeDataSet::NAME(), "Data");
unsigned int current_set_index = 1;
for (int cc = 0; cc < xmfGrid->GetNumberOfSets(); cc++)
{
XdmfSet* xmfSet = xmfGrid->GetSets(cc);
int ghost_value = xmfSet->GetGhost();
if (ghost_value != 0)
{
// skip ghost-sets.
continue;
}
const char* setName = xmfSet->GetName();
mb->GetMetaData(current_set_index)->Set(vtkCompositeDataSet::NAME(), setName);
if (!this->Domain->GetSetsSelection()->ArrayIsEnabled(setName))
{
continue;
}
// Okay now we have an enabled set. Create a new dataset for it
vtkDataSet* set = nullptr;
XdmfInt32 setType = xmfSet->GetSetType();
switch (setType)
{
case XDMF_SET_TYPE_NODE:
set = this->ExtractPoints(xmfSet, dataSet);
break;
case XDMF_SET_TYPE_CELL:
set = this->ExtractCells(xmfSet, dataSet);
break;
case XDMF_SET_TYPE_FACE:
set = this->ExtractFaces(xmfSet, dataSet);
break;
case XDMF_SET_TYPE_EDGE:
set = this->ExtractEdges(xmfSet, dataSet);
break;
}
if (set)
{
mb->SetBlock(current_set_index, set);
set->Delete();
}
current_set_index++;
}
return mb;
}
//------------------------------------------------------------------------------
vtkDataSet* vtkXdmfHeavyData::ExtractPoints(XdmfSet* xmfSet, vtkDataSet* dataSet)
{
// TODO: How to handle structured datasets with update_extents/strides etc.
// Do they too always produce vtkUniformGrid or do we want to produce
// structured dataset
// Read heavy data. We cannot do anything smart if update_extents or stride
// is specified here. We have to read the entire set and then prune it.
xmfSet->Update();
XdmfArray* xmfIds = xmfSet->GetIds();
XdmfInt64 numIds = xmfIds->GetNumberOfElements();
std::vector<XdmfInt64> ids(numIds + 1);
xmfIds->GetValues(0, ids.data(), numIds);
// release heavy data.
xmfSet->Release();
vtkUnstructuredGrid* output = vtkUnstructuredGrid::New();
vtkPoints* outputPoints = vtkPoints::New();
outputPoints->SetNumberOfPoints(numIds);
output->SetPoints(outputPoints);
outputPoints->Delete();
vtkIdType numInPoints = dataSet->GetNumberOfPoints();
for (vtkIdType kk = 0; kk < numIds; kk++)
{
if (ids[kk] < 0 || ids[kk] > numInPoints)
{
vtkWarningWithObjectMacro(this->Reader, "No such cell or point exists: " << ids[kk]);
continue;
}
double point_location[3];
dataSet->GetPoint(ids[kk], point_location);
outputPoints->SetPoint(kk, point_location);
}
ids.clear(); // done with ids
// Read node-centered attributes that may be defined on this set.
int numAttributes = xmfSet->GetNumberOfAttributes();
for (int cc = 0; cc < numAttributes; cc++)
{
XdmfAttribute* xmfAttribute = xmfSet->GetAttribute(cc);
const char* attrName = xmfAttribute->GetName();
int attrCenter = xmfAttribute->GetAttributeCenter();
if (attrCenter != XDMF_ATTRIBUTE_CENTER_NODE)
{
continue;
}
vtkDataArray* array = this->ReadAttribute(xmfAttribute, 1, nullptr);
if (array)
{
array->SetName(attrName);
output->GetPointData()->AddArray(array);
array->Delete();
}
}
std::vector<vtkIdType> vtk_cell_ids(numIds);
std::iota(vtk_cell_ids.begin(), vtk_cell_ids.end(), 0);
output->InsertNextCell(VTK_POLY_VERTEX, numIds, vtk_cell_ids.data());
return output;
}
//------------------------------------------------------------------------------
vtkDataSet* vtkXdmfHeavyData::ExtractCells(XdmfSet* xmfSet, vtkDataSet* dataSet)
{
// TODO: How to handle structured datasets with update_extents/strides etc.
// Do they too always produce vtkUniformGrid or do we want to produce
// structured dataset
// Read heavy data.
xmfSet->Update();
XdmfArray* xmfIds = xmfSet->GetIds();
XdmfInt64 numIds = xmfIds->GetNumberOfElements();
vtkIdTypeArray* ids = vtkIdTypeArray::New();
ids->SetNumberOfComponents(1);
ids->SetNumberOfTuples(numIds);
xmfIds->GetValues(0, (vtkXdmfIdType*)ids->GetPointer(0), numIds);
// release heavy data.
xmfSet->Release();
// We directly use vtkExtractSelectedIds for extract cells since the logic to
// extract cells it no trivial (like extracting points).
vtkSelectionNode* selNode = vtkSelectionNode::New();
selNode->SetContentType(vtkSelectionNode::INDICES);
selNode->SetFieldType(vtkSelectionNode::CELL);
selNode->SetSelectionList(ids);
vtkSelection* sel = vtkSelection::New();
sel->AddNode(selNode);
selNode->Delete();
vtkExtractSelectedIds* extractCells = vtkExtractSelectedIds::New();
extractCells->SetInputData(0, dataSet);
extractCells->SetInputData(1, sel);
extractCells->Update();
vtkDataSet* output = vtkDataSet::SafeDownCast(extractCells->GetOutput()->NewInstance());
output->CopyStructure(vtkDataSet::SafeDownCast(extractCells->GetOutput()));
sel->Delete();
extractCells->Delete();
ids->Delete();
// Read cell-centered attributes that may be defined on this set.
int numAttributes = xmfSet->GetNumberOfAttributes();
for (int cc = 0; cc < numAttributes; cc++)
{
XdmfAttribute* xmfAttribute = xmfSet->GetAttribute(cc);
const char* attrName = xmfAttribute->GetName();
int attrCenter = xmfAttribute->GetAttributeCenter();
if (attrCenter != XDMF_ATTRIBUTE_CENTER_CELL)
{
continue;
}
vtkDataArray* array = this->ReadAttribute(xmfAttribute, 1, nullptr);
if (array)
{
array->SetName(attrName);
output->GetCellData()->AddArray(array);
array->Delete();
}
}
return output;
}
//------------------------------------------------------------------------------
vtkDataSet* vtkXdmfHeavyData::ExtractFaces(XdmfSet* xmfSet, vtkDataSet* dataSet)
{
xmfSet->Update();
XdmfArray* xmfIds = xmfSet->GetIds();
XdmfArray* xmfCellIds = xmfSet->GetCellIds();
XdmfInt64 numFaces = xmfIds->GetNumberOfElements();
// ids is a 2 component array were each tuple is (cell-id, face-id).
vtkIdTypeArray* ids = vtkIdTypeArray::New();
ids->SetNumberOfComponents(2);
ids->SetNumberOfTuples(numFaces);
xmfCellIds->GetValues(0, (vtkXdmfIdType*)ids->GetPointer(0), numFaces, 1, 2);
xmfIds->GetValues(0, (vtkXdmfIdType*)ids->GetPointer(1), numFaces, 1, 2);
vtkPolyData* output = vtkPolyData::New();
vtkCellArray* polys = vtkCellArray::New();
output->SetPolys(polys);
polys->Delete();
vtkPoints* outPoints = vtkPoints::New();
output->SetPoints(outPoints);
outPoints->Delete();
vtkMergePoints* mergePoints = vtkMergePoints::New();
mergePoints->InitPointInsertion(outPoints, dataSet->GetBounds());
for (vtkIdType cc = 0; cc < numFaces; cc++)
{
vtkIdType cellId = ids->GetValue(cc * 2);
vtkIdType faceId = ids->GetValue(cc * 2 + 1);
vtkCell* cell = dataSet->GetCell(cellId);
if (!cell)
{
vtkWarningWithObjectMacro(this->Reader, "Invalid cellId: " << cellId);
continue;
}
vtkCell* face = cell->GetFace(faceId);
if (!face)
{
vtkWarningWithObjectMacro(this->Reader, "Invalid faceId " << faceId << " on cell " << cellId);
continue;
}
// Now insert this face a new cell in the output dataset.
vtkIdType numPoints = face->GetNumberOfPoints();
vtkPoints* facePoints = face->GetPoints();
std::vector<vtkIdType> outputPts(numPoints + 1);
for (vtkIdType kk = 0; kk < numPoints; kk++)
{
mergePoints->InsertUniquePoint(facePoints->GetPoint(kk), outputPts[kk]);
}
polys->InsertNextCell(numPoints, outputPts.data());
}
ids->Delete();
xmfSet->Release();
mergePoints->Delete();
// Read face-centered attributes that may be defined on this set.
int numAttributes = xmfSet->GetNumberOfAttributes();
for (int cc = 0; cc < numAttributes; cc++)
{
XdmfAttribute* xmfAttribute = xmfSet->GetAttribute(cc);
const char* attrName = xmfAttribute->GetName();
int attrCenter = xmfAttribute->GetAttributeCenter();
if (attrCenter != XDMF_ATTRIBUTE_CENTER_FACE)
{
continue;
}
vtkDataArray* array = this->ReadAttribute(xmfAttribute, 1, nullptr);
if (array)
{
array->SetName(attrName);
output->GetCellData()->AddArray(array);
array->Delete();
}
}
return output;
}
//------------------------------------------------------------------------------
vtkDataSet* vtkXdmfHeavyData::ExtractEdges(XdmfSet* xmfSet, vtkDataSet* dataSet)
{
xmfSet->Update();
XdmfArray* xmfIds = xmfSet->GetIds();
XdmfArray* xmfCellIds = xmfSet->GetCellIds();
XdmfArray* xmfFaceIds = xmfSet->GetFaceIds();
XdmfInt64 numEdges = xmfIds->GetNumberOfElements();
// ids is a 3 component array were each tuple is (cell-id, face-id, edge-id).
vtkIdTypeArray* ids = vtkIdTypeArray::New();
ids->SetNumberOfComponents(3);
ids->SetNumberOfTuples(numEdges);
xmfCellIds->GetValues(0, (vtkXdmfIdType*)ids->GetPointer(0), numEdges, 1, 3);
xmfFaceIds->GetValues(0, (vtkXdmfIdType*)ids->GetPointer(1), numEdges, 1, 3);
xmfIds->GetValues(0, (vtkXdmfIdType*)ids->GetPointer(2), numEdges, 1, 3);
vtkPolyData* output = vtkPolyData::New();
vtkCellArray* lines = vtkCellArray::New();
output->SetLines(lines);
lines->Delete();
vtkPoints* outPoints = vtkPoints::New();
output->SetPoints(outPoints);
outPoints->Delete();
vtkMergePoints* mergePoints = vtkMergePoints::New();
mergePoints->InitPointInsertion(outPoints, dataSet->GetBounds());
for (vtkIdType cc = 0; cc < numEdges; cc++)
{
vtkIdType cellId = ids->GetValue(cc * 3);
vtkIdType faceId = ids->GetValue(cc * 3 + 1);
vtkIdType edgeId = ids->GetValue(cc * 3 + 2);
vtkCell* cell = dataSet->GetCell(cellId);
if (!cell)
{
vtkWarningWithObjectMacro(this->Reader, "Invalid cellId: " << cellId);
continue;
}
vtkCell* face = cell->GetFace(faceId);
if (!face)
{
vtkWarningWithObjectMacro(this->Reader, "Invalid faceId " << faceId << " on cell " << cellId);
continue;
}
vtkCell* edge = cell->GetEdge(edgeId);
if (!edge)
{
vtkWarningWithObjectMacro(this->Reader,
"Invalid edgeId " << edgeId << " on face " << faceId << " on cell " << cellId);
continue;
}
// Now insert this edge as a new cell in the output dataset.
vtkIdType numPoints = edge->GetNumberOfPoints();
vtkPoints* edgePoints = edge->GetPoints();
std::vector<vtkIdType> outputPts(numPoints + 1);
for (vtkIdType kk = 0; kk < numPoints; kk++)
{
mergePoints->InsertUniquePoint(edgePoints->GetPoint(kk), outputPts[kk]);
}
lines->InsertNextCell(numPoints, outputPts.data());
}
ids->Delete();
xmfSet->Release();
mergePoints->Delete();
// Read edge-centered attributes that may be defined on this set.
int numAttributes = xmfSet->GetNumberOfAttributes();
for (int cc = 0; cc < numAttributes; cc++)
{
XdmfAttribute* xmfAttribute = xmfSet->GetAttribute(cc);
const char* attrName = xmfAttribute->GetName();
int attrCenter = xmfAttribute->GetAttributeCenter();
if (attrCenter != XDMF_ATTRIBUTE_CENTER_EDGE)
{
continue;
}
vtkDataArray* array = this->ReadAttribute(xmfAttribute, 1, nullptr);
if (array)
{
array->SetName(attrName);
output->GetCellData()->AddArray(array);
array->Delete();
}
}
return output;
}
| 22,140 |
2,344 |
<gh_stars>1000+
#!/usr/bin/env python
"""
Created by howie.hu at 30/03/2018.
"""
import time
from pprint import pprint
from ruia import Spider, Item, TextField, AttrField
from ruia_ua import middleware as ua_middleware
from owllook.database.mongodb import MotorBase
class HYNovelInfoItem(Item):
"""
定义继承自item的Item类
"""
novel_name = AttrField(css_select="meta[property='og:title']", attr='content')
author = AttrField(css_select="meta[property='og:novel:author']", attr='content')
cover = AttrField(css_select="meta[property='og:image']", attr='content')
abstract = AttrField(css_select="meta[property='og:description']", attr='content')
status = AttrField(css_select="meta[property='og:novel:status']", attr='content')
novels_type = AttrField(css_select="meta[property='og:novel:category']", attr='content')
novel_chapter_url = AttrField(css_select='div#voteList a.index', attr='href')
latest_chapter = AttrField(css_select="meta[property='og:novel:latest_chapter_name']", attr='content')
latest_chapter_url = AttrField(css_select="meta[property='og:novel:latest_chapter_url']", attr='content')
latest_chapter_time = AttrField(css_select="meta[property='og:novel:update_time']", attr='content')
# novel_name = TextField(css_select='div.c-left>div.mod>div.hd>h2')
# author = TextField(css_select='div.author-zone div.right a.name strong')
# cover = AttrField(css_select='img.book-cover', attr='src')
# abstract = TextField(css_select='pre.note')
# status = ''
# novels_type = TextField(css_select='div.c-left>div.mod>div.hd>p.infos>span.cate>a')
# latest_chapter = ''
# novel_chapter_url = AttrField(css_select='div#voteList a.index', attr='href')
async def clean_cover(self, cover):
if 'https' in cover:
return cover
else:
return cover.replace('http', 'https')
async def clean_novels_type(self, novels_type):
types_dict = {
'社会': '都市'
}
print(types_dict.get(str(novels_type).strip(), novels_type))
return types_dict.get(str(novels_type).strip(), novels_type)
async def clean_latest_chapter_time(self, latest_chapter_time):
return latest_chapter_time.replace(u'今天', str(time.strftime("%Y-%m-%d ", time.localtime()))).replace(u'昨日', str(
time.strftime("%Y-%m-%d ", time.localtime(time.time() - 24 * 60 * 60))))
class HYNovelInfoSpider(Spider):
start_urls = []
request_config = {
'RETRIES': 3,
'TIMEOUT': 10
}
async def parse(self, res):
self.motor_db = MotorBase(loop=self.loop).get_db()
item = await HYNovelInfoItem.get_item(html=res.html)
item_data = {
'novel_name': item.novel_name,
'author': item.author,
'cover': item.cover,
'abstract': item.abstract,
'status': item.status,
'novels_type': item.novels_type,
'novel_chapter_url': item.novel_chapter_url,
'latest_chapter': item.latest_chapter,
'latest_chapter_time': item.latest_chapter_time,
'spider': 'heiyan',
'target_url': res.url,
'updated_at': time.strftime("%Y-%m-%d %X", time.localtime())
}
print('获取 {} 小说信息成功'.format(item_data['novel_name']))
await self.save(res_dic=item_data)
async def save(self, res_dic):
# 存进数据库
try:
motor_db = MotorBase().get_db()
await motor_db.all_novels_info.update_one({
'novel_name': res_dic['novel_name'], 'spider': 'heiyan'},
{'$set': res_dic},
upsert=True)
except Exception as e:
self.logger.exception(e)
if __name__ == '__main__':
HYNovelInfoSpider.start_urls = ['http://www.heiyan.com/book/62599']
# HYNovelInfoSpider.start_urls = [each.get('novel_url', '') for each in search_author('火星引力', 'qidian')]
print(HYNovelInfoSpider.start_urls)
HYNovelInfoSpider.start(middleware=ua_middleware)
| 1,834 |
388 |
<filename>Source/CesiumRuntime/Public/CesiumMetadataUtilityBlueprintLibrary.h
#pragma once
#include "CesiumMetadataFeatureTable.h"
#include "CesiumMetadataGenericValue.h"
#include "CesiumMetadataPrimitive.h"
#include "Containers/UnrealString.h"
#include "Kismet/BlueprintFunctionLibrary.h"
#include "UObject/ObjectMacros.h"
#include "CesiumMetadataUtilityBlueprintLibrary.generated.h"
UCLASS()
class CESIUMRUNTIME_API UCesiumMetadataUtilityBlueprintLibrary
: public UBlueprintFunctionLibrary {
GENERATED_BODY()
public:
/**
* Gets the primitive metadata of a glTF primitive component. If component is
* not a Cesium glTF primitive component, the returned metadata is empty
*/
UFUNCTION(
BlueprintCallable,
BlueprintPure,
Category = "Cesium|Metadata|Utility")
static FCesiumMetadataPrimitive
GetPrimitiveMetadata(const UPrimitiveComponent* component);
/**
* Gets the metadata of a face of a glTF primitive component. If the component
* is not a Cesium glTF primitive component, the returned metadata is empty.
* If the primitive has multiple feature tables, the metadata in the first
* table is returned.
*/
UFUNCTION(
BlueprintCallable,
BlueprintPure,
Category = "Cesium|Metadata|Utility")
static TMap<FString, FCesiumMetadataGenericValue>
GetMetadataValuesForFace(const UPrimitiveComponent* component, int64 faceID);
/**
* Gets the metadata as string of a face of a glTF primitive component. If the
* component is not a Cesium glTF primitive component, the returned metadata
* is empty. If the primitive has multiple feature tables, the metadata in the
* first table is returned.
*/
UFUNCTION(
BlueprintCallable,
BlueprintPure,
Category = "Cesium|Metadata|Utility")
static TMap<FString, FString> GetMetadataValuesAsStringForFace(
const UPrimitiveComponent* component,
int64 faceID);
/**
* Gets the feature ID associated with a given face for a given feature table.
*/
UFUNCTION(
BlueprintCallable,
BlueprintPure,
Category = "Cesium|Metadata|Utility")
static int64 GetFeatureIDForFace(
UPARAM(ref) const FCesiumMetadataPrimitive& Primitive,
UPARAM(ref) const FCesiumMetadataFeatureTable& FeatureTable,
int64 faceID);
};
| 761 |
8,428 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/core/node/common/service/object_search_message.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# syft absolute
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
from syft.proto.core.io import address_pb2 as proto_dot_core_dot_io_dot_address__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n:proto/core/node/common/service/object_search_message.proto\x12\x1dsyft.core.node.common.service\x1a%proto/core/common/common_object.proto\x1a\x1bproto/core/io/address.proto"\xb4\x01\n\x13ObjectSearchMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\'\n\x08reply_to\x18\x03 \x01(\x0b\x32\x15.syft.core.io.Address\x12%\n\x06obj_id\x18\x04 \x01(\x0b\x32\x15.syft.core.common.UID"z\n\x18ObjectSearchReplyMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x0f\n\x07results\x18\x03 \x03(\x0c\x62\x06proto3'
)
_OBJECTSEARCHMESSAGE = DESCRIPTOR.message_types_by_name["ObjectSearchMessage"]
_OBJECTSEARCHREPLYMESSAGE = DESCRIPTOR.message_types_by_name["ObjectSearchReplyMessage"]
ObjectSearchMessage = _reflection.GeneratedProtocolMessageType(
"ObjectSearchMessage",
(_message.Message,),
{
"DESCRIPTOR": _OBJECTSEARCHMESSAGE,
"__module__": "proto.core.node.common.service.object_search_message_pb2"
# @@protoc_insertion_point(class_scope:syft.core.node.common.service.ObjectSearchMessage)
},
)
_sym_db.RegisterMessage(ObjectSearchMessage)
ObjectSearchReplyMessage = _reflection.GeneratedProtocolMessageType(
"ObjectSearchReplyMessage",
(_message.Message,),
{
"DESCRIPTOR": _OBJECTSEARCHREPLYMESSAGE,
"__module__": "proto.core.node.common.service.object_search_message_pb2"
# @@protoc_insertion_point(class_scope:syft.core.node.common.service.ObjectSearchReplyMessage)
},
)
_sym_db.RegisterMessage(ObjectSearchReplyMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_OBJECTSEARCHMESSAGE._serialized_start = 162
_OBJECTSEARCHMESSAGE._serialized_end = 342
_OBJECTSEARCHREPLYMESSAGE._serialized_start = 344
_OBJECTSEARCHREPLYMESSAGE._serialized_end = 466
# @@protoc_insertion_point(module_scope)
| 1,195 |
579 |
/*
* Copyright (c) 2020-2021 Valve Corporation
* Copyright (c) 2020-2021 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Authors:
* - <NAME>. <<EMAIL>>
* - <NAME> <<EMAIL>>
*/
#include "widget_setting_list.h"
#include "widget_setting_list_element.h"
#include "widget_setting.h"
#include "../vkconfig_core/util.h"
#include <QMessageBox>
#include <cassert>
const char *GetFieldToolTip(const SettingMetaList &meta, bool current_list_empty) {
if (meta.list.empty()) {
return "Start tapping to add a new value";
} else if (meta.list_only && current_list_empty) {
return "All the accepted values are already listed";
} else {
return "Start tapping to search for available values";
}
}
WidgetSettingList::WidgetSettingList(QTreeWidget *tree, QTreeWidgetItem *item, const SettingMetaList &meta,
SettingDataSet &data_set)
: WidgetSettingBase(tree, item),
meta(meta),
data_set(data_set),
search(nullptr),
field(new QLineEdit(this)),
add_button(new QPushButton(this)),
list(meta.list) {
assert(&this->meta);
std::vector<EnabledNumberOrString> value = this->data().value;
for (std::size_t i = 0, n = value.size(); i < n; ++i) {
::RemoveValue(this->list, value[i]);
}
const char *tooltip = GetFieldToolTip(this->meta, this->list.empty());
this->field->show();
this->field->setText("");
this->field->setToolTip(tooltip);
this->field->setFont(this->tree->font());
this->field->setFocusPolicy(Qt::StrongFocus);
this->field->installEventFilter(this);
this->ResetCompleter();
this->connect(this->field, SIGNAL(textChanged(const QString &)), this, SLOT(OnTextEdited(const QString &)));
this->connect(this->field, SIGNAL(returnPressed()), this, SLOT(OnElementAppended()), Qt::QueuedConnection);
this->add_button->show();
this->add_button->setText("+");
this->add_button->setFont(this->tree->font());
this->connect(this->add_button, SIGNAL(pressed()), this, SLOT(OnElementAppended()), Qt::QueuedConnection);
std::sort(value.begin(), value.end());
this->data().value = value;
this->item->setText(0, (this->meta.label + " ").c_str());
this->item->setFont(0, this->tree->font());
this->item->setToolTip(0, this->meta.description.c_str());
this->item->setSizeHint(0, QSize(0, ITEM_HEIGHT));
this->item->setExpanded(this->meta.expanded);
this->tree->setItemWidget(this->item, 0, this);
this->Refresh(REFRESH_ENABLE_AND_STATE);
}
void WidgetSettingList::Refresh(RefreshAreas refresh_areas) {
const bool enabled = ::CheckDependence(this->meta, data_set);
this->item->setDisabled(!enabled);
this->setEnabled(enabled);
this->field->setEnabled(enabled && (!this->meta.list_only || !this->list.empty()));
this->add_button->setEnabled(enabled && !this->field->text().isEmpty());
if (this->meta.list_only && this->list.empty()) {
this->field->hide();
this->add_button->hide();
} else {
this->field->show();
this->add_button->show();
}
std::vector<EnabledNumberOrString> &value = this->data().value;
if (value != this->value_cached) {
this->value_cached = value;
this->tree->blockSignals(true);
while (this->item->childCount() > 0) {
this->item->removeChild(this->item->child(0));
}
for (std::size_t i = 0, n = value.size(); i < n; ++i) {
this->AddElement(value[i]);
}
this->tree->blockSignals(false);
}
if (refresh_areas == REFRESH_ENABLE_AND_STATE) {
if (::CheckSettingOverridden(this->meta)) {
this->DisplayOverride(this->field, this->meta);
}
}
}
void WidgetSettingList::Resize() {
const int button_size = MIN_BUTTON_SIZE;
const QFontMetrics fm = this->fontMetrics();
const int text_width = HorizontalAdvance(fm, item->text(0));
this->field->setGeometry(text_width, 0, this->size.width() - button_size - text_width, this->size.height());
this->add_button->setGeometry(this->size.width() - button_size, 0, button_size, this->size.height());
}
void WidgetSettingList::resizeEvent(QResizeEvent *event) {
this->size = event->size();
this->Resize();
}
bool WidgetSettingList::eventFilter(QObject *target, QEvent *event) {
(void)target;
if (event->type() == QEvent::Wheel) {
event->ignore();
return true;
}
return this->field->eventFilter(target, event);
}
void WidgetSettingList::ResetCompleter() {
if (this->search != nullptr) this->search->deleteLater();
this->search = new QCompleter(ConvertValues(this->list), this);
this->search->setCompletionMode(QCompleter::PopupCompletion);
this->search->setModelSorting(QCompleter::CaseSensitivelySortedModel);
this->search->setFilterMode(Qt::MatchContains);
this->search->setCaseSensitivity(Qt::CaseSensitive);
this->search->setMaxVisibleItems(20);
this->field->setCompleter(this->search);
this->connect(this->search, SIGNAL(activated(const QString &)), this, SLOT(OnCompleted(const QString &)), Qt::QueuedConnection);
}
void WidgetSettingList::AddElement(EnabledNumberOrString &element) {
QTreeWidgetItem *child = new QTreeWidgetItem();
child->setSizeHint(0, QSize(0, ITEM_HEIGHT));
this->item->addChild(child);
WidgetSettingListElement *widget = new WidgetSettingListElement(this->tree, child, this->meta, this->data_set, element);
this->tree->setItemWidget(child, 0, widget);
this->connect(widget, SIGNAL(itemChanged()), this, SLOT(OnSettingChanged()));
this->connect(widget, SIGNAL(itemSelected(const QString &)), this, SLOT(OnElementRemoved(const QString &)));
}
void WidgetSettingList::OnCompleted(const QString &value) {
(void)value;
// We can't do this right away, the completer emits it's signal
// before it's really "complete". If we clear the control too soon
// it clears the completers value too. This might be a Qt bug, but this
// works really well as a work-a-round
OnElementAppended();
}
void WidgetSettingList::OnElementAppended() {
const std::string entry = this->field->text().toStdString();
if (entry.empty()) return;
if (this->meta.list_only && !IsValueFound(this->meta.list, entry)) {
QMessageBox alert;
alert.setWindowTitle("Invalid value");
alert.setText(format("'%s' setting doesn't accept '%s' as a value", this->meta.label.c_str(), entry.c_str()).c_str());
alert.setInformativeText("Please select a value from the list.");
alert.setIcon(QMessageBox::Warning);
alert.exec();
return;
}
std::vector<EnabledNumberOrString> &value = this->data().value;
// Add the value if it's not in the list already
if (IsValueFound(value, entry)) {
QMessageBox alert;
alert.setWindowTitle("Duplicated value");
alert.setText(format("'%s' setting already has the value '%s' listed", this->meta.label.c_str(), entry.c_str()).c_str());
alert.setIcon(QMessageBox::Warning);
alert.exec();
return;
}
this->field->setText("");
value.push_back(entry);
std::sort(value.begin(), value.end());
::RemoveValue(this->list, entry);
emit itemChanged();
}
void WidgetSettingList::OnTextEdited(const QString &value) {
assert(this->add_button);
assert(this->field);
if (value.isEmpty()) {
this->item->setText(0, (this->meta.label + " ").c_str());
} else if (value.size() >= 1 && !this->item->text(0).isEmpty()) {
this->item->setText(0, "");
}
this->Resize();
this->add_button->setEnabled(!value.isEmpty());
}
void WidgetSettingList::OnElementRemoved(const QString &element) {
NumberOrString list_value(element.toStdString());
this->list.push_back(list_value);
RemoveValue(this->data().value, EnabledNumberOrString(list_value));
}
void WidgetSettingList::OnSettingChanged() { emit itemChanged(); }
SettingDataList &WidgetSettingList::data() {
SettingDataList *data = FindSetting<SettingDataList>(this->data_set, this->meta.key.c_str());
assert(data != nullptr);
return *data;
}
| 3,253 |
578 |
<filename>cachelib/compact_cache/CCacheVariableLruBucket.h
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <folly/logging/xlog.h>
#include <algorithm>
#include <utility>
/**
* This file implements a bucket management for variable sized objects that is
* highly optimized for read operations and has a 4 byte overhead per entry + 3
* bytes per bucket.
*
* +---------------+---------------+-----------------------------------------+
* | | | |
* | | | |
* | Entry headers | Empty | Data |
* | | | |
* | | | |
* +---------------+---------------+-----------------------------------------+
*
* In this implementation, the bucket is split in three sections:
*
* 1) Entry headers.
* This section contains the entries' headers (EntryHdr). There is one
* entry header per entry in the bucket. Each entry header contains the
* following data:
* - key: the key of the entry;
* - dataSize: the size of the entry's value.
* - dataOffset: the position where to find the entry's value in the "Data"
* part of the bucket.
* The number of entries is given by the "numEntries" field of Bucket.
*
* 2) Data.
* This section contains the entries' values (EntryData). Each
* EntryData field contains:
* - headerIndex: the position of the corresponding entry header in the
* "Entry headers" section.
* - data: the value's payload. The size of this payload is determined by the
* 'dataSize' field of the corresponding entry header.
* This section has no 'holes', i.e all the entries are contiguous. The size
* of the data section is given by the "totalDataSize" field of Bucket.
*
* 3) Empty.
* This section shrinks/expands as entries are added/removed.
*
* The position of the entry headers in the "Entry headers" section determines
* the age of the entries. The right-most entry is the LRU, the left-most is the
* MRU. When an entry is promoted, the entry headers are shifted appropriately
* so that the promoted entry ends up to the left. For each entry affected by
* the shifting, the corrresponding EntryData in the "Data" section has its
* headerIndex field updated accordingly.
*
* When an entry is inserted, we check if the "Empty" section is big enough to
* hold both the new EntryHdr and the EntryData. If it is not big enough,
* we compute how many entries need to evicted in order to make enough space.
* The eviction algorithm is more expensive. Even though selecting the entries
* to be evicted is straightforward as you only need to browse them from right
* to left in the "Entry headers" section, removing them from the "Data" section
* is not trivial. Each 'block' of entries that are between two evicted entries
* are shifted to the right in order to expand the size of the "Empty" section.
* Once the necessary items have been evicted and the "Empty" section is big
* enough, the new entry's header is added to the "Entry headers" section, and
* its data is written at the beginning of the "Data" section.
*
* When an entry is deleted, the entry headers that follow the deleted entry's
* header are shifted to the left, and we update the headerIndex of the related
* EntryData. The EntryData objects that precede the deleted EntryData
* are shifted to the right. Deleting an entry expands the size of the "Empty
* section" by adding sizeof(EntryHdr) + sizeof(EntryData) + the amount of
* bytes in the payload.
*
* When an entry is updated, if the size does not change the EntryData is
* updated in place. If the new size is smaller, the end of the data is the
* same, but the beginning is moved to the right. This generates a space into
* which the preceding data is moved. If the new size is bigger, we fall back
* to deleting the entry and adding it again.
*/
namespace facebook {
namespace cachelib {
template <typename CompactCacheDescriptor>
struct VariableLruBucket {
public:
using Descriptor = CompactCacheDescriptor;
using ValueDescriptor = typename Descriptor::ValueDescriptor;
using Key = typename Descriptor::Key;
using Value = typename ValueDescriptor::Value;
static_assert(Descriptor::kHasValues,
"This bucket descriptor must be used for compact caches with"
"values.");
static_assert(!Descriptor::kValuesFixedSize,
"This bucket descriptor must be used with values of a"
"variable size.");
/** Type of the integral that contains the the size of an entry's data.
* (see dataSize in EntryHdr). */
using EntryDataSize = uint8_t;
/** Type of the integral that gives an offset (in bytes), starting from the
* 'data' field of Bucket. This is used to get the start offset of an
* EntryData object (see dataOffset in EntryHdr), and the total data
* size (in bytes) of a bucket (i.e the size of the 'Data' section). */
using EntryDataOffset = uint16_t;
/** Type of the integral that gives the index of an EntryHdr header in
* the 'Entry Headers' section (see headerIndex in EntryData), and the
* total number of entries (see numEntries in Bucket). */
using EntryNum = uint8_t;
constexpr static size_t kMaxValueSize = ValueDescriptor::kMaxSize;
/** Maximum number of entries per bucket. The number of entries must be
* small enough to fit in a value of type EntryNum, so take the max
* possible value here. */
constexpr static size_t kMaxEntries = std::numeric_limits<EntryNum>::max();
/** An entry header. */
using EntryHdr = struct {
Key key;
/* Size in bytes of the corresponding EntryData's data field. */
EntryDataSize dataSize;
/* Offset (in bytes) where to find the corresponding EntryData,
* measured from the beginning of the bucket's data field. */
EntryDataOffset dataOffset;
} __attribute__((__packed__));
using EntryData = struct {
/* Index of the corresponding EntryHdr header in the 'Entry
* Headers' section. This is a value between 0 and the number of entries
* in the bucket. */
EntryNum headerIndex;
/* Beginning of the entry's data. The size of this data is determined by
* the 'dataSize' field of the corresponding EntryHdr. */
char data[0];
} __attribute__((__packed__));
/** Compute the size of the bucket so that we can guarantee that it will be
* big enough to host an entry of the maximum size provided by the user. */
constexpr static size_t kBucketDataSize =
kMaxValueSize + sizeof(EntryHdr) + sizeof(EntryData);
using Bucket = struct {
/* Number of entries in the bucket. */
EntryNum numEntries;
/* Size of the "Data" section.
* The free space in the bucket is given by computing:
* kBucketDataSize - totalDataSize - sizeof(EntryHdr) * numEntries.
*/
EntryDataOffset totalDataSize;
/* Content of the bucket. This contains the three sections described in
* this file's documentation. */
char data[kBucketDataSize];
} __attribute__((__packed__));
/** This is to make sure that the 'totalDataSize' field of Bucket will
* never overflow. */
static_assert(kBucketDataSize <= std::numeric_limits<EntryDataOffset>::max(),
"Bucket is too big");
/**
* Handle to an entry. This class provides the compact cache implementation
* with a way to keep a reference to an entry in a bucket as well as a way
* to iterate over each valid entries without being aware of the underlying
* bucket implementation.
* This basically contains a pointer to the bucket and the index of the
* entry in the array of entry headers.
*
* Example, iterate over the valid entries in a bucket:
*
* LogStoreBucket<C>::EntryHandle h = LogStoreBucket<C>::first(myBucket);
* while (h) {
* // h refers to a valid entry in the bucket.
* // can use h.key(), h.val() to access content
* // of the entry.
* h.next();
* }
*/
class EntryHandle {
public:
explicit operator bool() const {
return bucket_ && 0 <= pos_ && pos_ < bucket_->numEntries;
}
void next() {
XDCHECK(*this);
++pos_;
}
Key key() const { return getEntry()->key; }
constexpr size_t size() const { return getEntry()->dataSize; }
Value* val() const {
return reinterpret_cast<Value*>(getEntryData(bucket_, pos_)->data);
}
EntryHandle() : bucket_(nullptr), pos_(-1) {}
EntryHandle(Bucket* bucket, int pos) : bucket_(bucket), pos_(pos) {}
bool isBucketTail() const {
return *this && pos_ == bucket_->numEntries - 1;
}
private:
EntryHdr* getEntry() const { return getEntryHeader(bucket_, pos_); }
Bucket* bucket_;
int pos_;
friend class VariableLruBucket<CompactCacheDescriptor>;
};
/** Type of the callback to be called when an entry is evicted. */
using EvictionCb = std::function<void(const EntryHandle& handle)>;
/**
* Get a handle to the first entry in the bucket.
*
* @param bucket Bucket from which to retrieve the handle.
* @return Handle to the first entry.
*/
static EntryHandle first(Bucket* bucket) {
/* If bucket->numEntries is 0, the created handle is invalid. */
return EntryHandle(bucket, 0);
}
/**
* Return the total number of items this bucket could hold.
* Due to variable size this is imprecise; extrapolate capacity
* by dividing current # of items by current fractional memory
* in use
* @param bucket Bucket to find out the number of entries it could hold
* @return number of entries this bucket can hold (approx)
*/
static uint32_t nEntriesCapacity(const Bucket& bucket) {
const size_t n = bucket.numEntries;
const size_t sz = bucket.totalDataSize;
XDCHECK_LE(sz + sizeof(EntryHdr) * n, kBucketDataSize);
if (n == 0) {
return 0;
}
return (n * kBucketDataSize) / (sz + sizeof(EntryHdr) * n);
}
/**
* Insert a new entry in a bucket.
*
* 1) Compute the required space for our entry.
* 2) Call evictEntries, which takes care of evicting as many entries as
* required in order to have the necessary space.
* 3) Allocate a new spot in the "Empty" section by increasing
* bucket->totalDataSize.
* 4) Write the entry's data (EntryData) in the new spot.
* 5) Update the headerIndex of all the existing EntryData objects
* since their header is about to get shifted one position to the
* right. This step is the aging process of the entries.
* 6) Shift all the EntryHdr headers to the right so that we can write
* our new entry header in the first position.
* 7) Write the entry's header (EntryHdr).
* 8) Set the offsets in both the EntryHdr header and the EntryData so
* that they have a reference to each other.
* 9) Update the number of entries in the bucket.
*
* @param bucket Bucket in which to insert
* @param key key of the new entry.
* @param val Value to be inserted.
* @param size Size of the value to be inserted. The size must be
* smaller than or equal to the maximum value size
* described by the value descriptor, or else this
* function will assert because the bucket might not be
* large enough for the value.
* @param evictionCb Callback to be called for when an entry is evicted.
* Cannot be empty.
* @return 0 if no item was evicted, 1 if at least one item
* was evicted, -1 on error (the given size was too
* big).
*/
static int insert(Bucket* bucket,
const Key& key,
const Value* val,
size_t size,
EvictionCb evictionCb) {
/* The caller should ensure that the value is small enough to fit in
* the bucket. */
XDCHECK_LE(size, kMaxValueSize);
if (size > kMaxValueSize) {
XLOG(ERR) << "Cannot insert an value of size " << size
<< ", the size must be smaller than " << kMaxValueSize;
return -1;
}
/* Make sure that EntryDataSize is wide enough to hold the given
* size. */
checkOverflow<EntryDataOffset>(size);
if (size > std::numeric_limits<EntryDataSize>::max()) {
XLOG(ERR)
<< "Cannot insert an value of size " << size
<< ", the size must be smaller than the max value of EntryDataSize";
return -1;
}
#ifndef NDEBUG
checkBucketConsistency(bucket);
#endif
/* 1) Compute the required space for our entry. */
size_t requiredSpace = size + sizeof(EntryHdr) + sizeof(EntryData);
/* 2) Call evictEntries, which takes care of evicting as many entries as
* required in order to have the necessary space. */
bool evicted = evictEntries(bucket, requiredSpace, evictionCb);
#ifndef NDEBUG
/* EvictEntries should leave the bucket in a consistent state. */
checkBucketConsistency(bucket);
#endif
/* 3) Allocate a new spot in the "Empty" section by increasing
* bucket->totalDataSize. */
checkOverflow<EntryDataOffset>(bucket->totalDataSize + size +
sizeof(EntryData));
bucket->totalDataSize += size + sizeof(EntryData);
/* 4) Write the entry's data (EntryData) in the new spot. */
EntryData* newEntryData = getFirstEntryData(bucket);
memcpy(newEntryData->data, val, size);
/* 5) Update the headerIndex of all the existing EntryData objects
* since their header is about to get shifted one position to the
* right. */
for (unsigned int i = 0; i < bucket->numEntries; i++) {
getEntryData(bucket, i)->headerIndex++;
}
/* 6) Shift all the EntryHdr headers to the right so that we can
* write our new entry header in the first position. */
memmove(getEntryHeader(bucket, 1),
getEntryHeader(bucket, 0),
bucket->numEntries * sizeof(EntryHdr));
/* 7) Write the entry's header (EntryHdr) */
EntryHdr* newEntry = getEntryHeader(bucket, 0);
memcpy(&newEntry->key, &key, sizeof(Key));
newEntry->dataSize = size;
/* 8) Set the offsets in both the EntryHdr header and the
* EntryData so that they have a reference to each other. */
newEntry->dataOffset = kBucketDataSize - bucket->totalDataSize;
newEntryData->headerIndex = 0;
/* 9) Update the number of entries in the bucket. */
checkOverflow<EntryNum>(bucket->numEntries + 1);
bucket->numEntries++;
#ifndef NDEBUG
checkBucketConsistency(bucket);
#endif
return evicted ? 1 : 0;
}
/**
* Promote an entry.
*
* 1) Shift all the EntryHdr headers that precede the promoted
* EntryHdr one position to the right.
* 2) Write the promoted entry to the first position.
* 3) Update the headerIndex field of all EntryData objects for which
* the corresponding EntryHdr header was moved.
*
* @param handle Handle of the entry to be promoted. Remains valid after
* this call completes.
*/
static void promote(EntryHandle& handle) {
XDCHECK(handle);
EntryHdr toPromote = *handle.getEntry();
EntryHdr* firstEntry = getEntryHeader(handle.bucket_, 0);
/* 1) Shift all the EntryHdr headers that precede the promoted
* EntryHdr one position to the right. This is the aging process. */
size_t shiftDistance = handle.pos_ * sizeof(EntryHdr);
memmove(getEntryHeader(handle.bucket_, 1), firstEntry, shiftDistance);
/* 2) Write the promoted entry to the first position. */
memcpy(firstEntry, &toPromote, sizeof(EntryHdr));
/* 3) Update the headerIndex field of all EntryData objects for
* which the corresponding EntryHdr header was moved. */
for (unsigned int i = 0; i <= handle.pos_; i++) {
getEntryData(handle.bucket_, i)->headerIndex = i;
}
/* Modify handle so that it still points to the same entry. */
handle.pos_ = 0;
#ifndef NDEBUG
checkBucketConsistency(handle.bucket_);
#endif
}
/**
* Whether an entry needs promotion. Do this if it's beyond the first
* two. The ccache usually holds at least 8 items so this should be
* reasonably safe.
*/
static inline bool needs_promote(EntryHandle& handle) {
XDCHECK(handle);
return handle.pos_ > 1;
}
/**
* Delete an entry.
*
* 1) Shift all the EntryData objects that precede the EntryData being
* deleted. Update the corresponding dataOffset fields in the EntryHdr
* headers for those moved entries.
* 2) Update the headerIndex of all the EntryData objects that
* correspond to an EntryHdr that will be shifted to the left in 3).
* 3) Shift all the EntryHdr headers that are after the deleted
* EntryHdr one position to the left.
* 4) Reduce the total size of the bucket and decrement the number of
* entries in the bucket.
*
* @param handle Handle of the entry to be deleted. After this function
* completes, the handle points to the next valid entry or
* becomes invalid if no such entry.
*/
static void del(EntryHandle& handle) {
XDCHECK(handle);
EntryDataOffset shiftDistance =
handle.getEntry()->dataSize + sizeof(EntryData);
EntryDataOffset shiftChunkSize =
handle.getEntry()->dataOffset - getFirstEntryDataOffset(handle.bucket_);
/* 1) Shift all the EntryData objects that precede the EntryData
* object of the entry we are deleting. This function also takes care of
* updating the dataOffset field of all the corresponding EntryHdr
* headers so that these remain valid. */
shiftEntriesData(handle.bucket_,
getFirstEntryData(handle.bucket_),
shiftDistance,
shiftChunkSize);
/* 2) Update the headerIndex of all the EntryData objects that
* correspond to an EntryHdr that will be shifted to the left in 3).
*/
for (unsigned int i = handle.pos_ + 1; i < handle.bucket_->numEntries;
i++) {
getEntryData(handle.bucket_, i)->headerIndex--;
}
/* 3) Shift all the EntryHdr headers that are after the deleted
* EntryHdr one position to the left. */
if (handle.pos_ < handle.bucket_->numEntries - 1) {
size_t delta = handle.bucket_->numEntries - handle.pos_ - 1;
memmove(
handle.getEntry(), handle.getEntry() + 1, delta * sizeof(EntryHdr));
}
/* 4) Reduce the total size of the bucket and decrement the number of
* entries in the bucket. */
handle.bucket_->totalDataSize -= shiftDistance;
handle.bucket_->numEntries--;
#ifndef NDEBUG
checkBucketConsistency(handle.bucket_);
#endif
}
/**
* Update the value of an entry.
*
* There are three cases:
*
* 1/ The new size is equal to the old size:
* 1) Copy the new data in place.
* 2) Promote the entry.
* 2/ The new size is smaller than the old size:
* 1) Compute the new offset of the EntryData by moving the existing
* one to the right by the amount of bytes that makes the difference
* between the old size and the new size.
* 2) Copy the new value at this offset.
* 3) Shift the EntryData object that precede our updated entry so
* that we can expand the "Empty" section by the amount of bytes
* that are not used anymore by the entry.
* 4) Update the dataOffset and dataSize fields of the EntryHdr.
* 5) Update the total size of the bucket.
* 6) Promote the entry.
* 3/ The new size is bigger.
* 1) Delete the old entry.
* 2) Insert the entry again with the new size. We don't need to promote
* the entry here because insert will insert the entry in the MRU
* spot.
* 3) Update the handle to point to the first position.
*
* @param handle Handle to the entry to be updated. Remains valid after
* this function returns.
* @param val New value of the entry.
* @param size Size of the new value.
* @param evictionCb Eviction callback to be called when an entry is
* evicted due to relocating the updated entry.
*/
static void updateVal(EntryHandle& handle,
const Value* val,
size_t size,
EvictionCb evictionCb) {
XDCHECK(handle);
EntryHdr* existingEntry = handle.getEntry();
EntryData* entryData = getEntryData(handle.bucket_, handle.pos_);
if (existingEntry->dataSize == size) {
/* The new size is equal to the old size. */
/* 1) Copy the new data in place. */
memcpy(entryData->data, val, size);
/* 2) Promote the entry. */
promote(handle);
} else if (size < existingEntry->dataSize) {
/* New data is smaller. Update the data in place starting at the new
* offset and shift the EntryData objects that precede it. */
/* 1) Compute the new offset of the EntryData by moving the
* existing one to the right by the amount of bytes that makes the
* difference between the old size and the new size. */
const EntryDataOffset shiftDistance = existingEntry->dataSize - size;
entryData = reinterpret_cast<EntryData*>(
reinterpret_cast<char*>(entryData) + shiftDistance);
/* 2) Copy the new value at this offset. */
memcpy(entryData->data, val, size);
entryData->headerIndex = handle.pos_;
/* 3) Shift the EntryData object that precede our updated entry
* so that we can expand the "Empty" section by the amount of bytes
* that are not used anymore by the entry. */
const size_t shiftChunkSize =
existingEntry->dataOffset - getFirstEntryDataOffset(handle.bucket_);
shiftEntriesData(handle.bucket_,
getFirstEntryData(handle.bucket_),
shiftDistance,
shiftChunkSize);
/* 4) Update the dataOffset and dataSize fields of the
* EntryHdr. */
existingEntry->dataOffset += shiftDistance;
existingEntry->dataSize = size;
/* 5) Update the total size of the bucket. */
handle.bucket_->totalDataSize -= shiftDistance;
/* 6) Promote the entry. */
promote(handle);
#ifndef NDEBUG
checkBucketConsistency(handle.bucket_);
#endif
} else {
/* The new size is bigger. */
XDCHECK_GT(size, existingEntry->dataSize);
/* 1) Delete the old entry. */
const EntryHdr copy = *existingEntry;
del(handle);
/* 2) Insert the entry again with the new size. */
insert(handle.bucket_, copy.key, val, size, evictionCb);
/* 3) Update the handle to point to the new position. */
handle = first(handle.bucket_);
}
}
/**
* Copy a an entry's value to a buffer.
* The buffer must be large enough to store the value, i.e the caller should
* allocate a buffer of a size greater or equal to the maximum possible size
* of a value in this compact cache.
*
* @param val Buffer in which to copy the entry's value.
* @param handle Handle of the entry from which to copy the value. Remains
* valid after this function returns.
*/
static void copyVal(Value* val, size_t* size, const EntryHandle& handle) {
XDCHECK(handle);
XDCHECK(val);
XDCHECK(size);
*size = handle.size();
const EntryData* entryData = getEntryData(handle.bucket_, handle.pos_);
memcpy(val, entryData->data, *size);
}
constexpr static size_t maxValueSize() { return kMaxValueSize; }
private:
/**
* Check that a bucket is consistent. This goes through all the entries and
* checks the offsets to verify that they match between the EntryHdr
* headers and the EntryData objects.
* This also verifies that the total sum of the sizes of all the entries is
* equal to the size of the "Data" section, and that the EntryData
* objects are contiguous.
*
* This should be called after each operation when in debug mode in order to
* verify that the operation leaves the bucket in a consistent state.
*
* @param bucket Bucket to be checked.
*/
static void checkBucketConsistency(Bucket* bucket) {
EntryHandle handle = first(bucket);
size_t totalSize = 0;
size_t nEntries = 0;
bool headerOffsetError = false;
/* Check that the data part does not overlap the entry headers. */
XDCHECK_GE(reinterpret_cast<uintptr_t>(getFirstEntryData(bucket)),
reinterpret_cast<uintptr_t>(
getEntryHeader(bucket, bucket->numEntries)));
/* Keep track of the EntryData's offsets and sizes as we see then
* when iterating on the EntryHdr headers. We will later use this to
* check that the EntryData objects are contiguous. */
using EntryInfo = std::pair<EntryDataOffset, EntryDataSize>;
std::vector<EntryInfo> entryDataSeen;
/* Iterate on the EntryHdr headers. */
while (handle) {
EntryData* entryData = getEntryData(handle.bucket_, handle.pos_);
totalSize += handle.size() + sizeof(EntryData);
/* The offset headers of the entries we are seeing should match
* their order. */
if (entryData->headerIndex != nEntries) {
headerOffsetError = true;
}
EntryHdr* entryHeader = getEntryHeader(bucket, handle.pos_);
EntryDataOffset dataOffset = entryHeader->dataOffset;
EntryDataSize dataSize = entryHeader->dataSize;
entryDataSeen.push_back(std::make_pair(dataOffset, dataSize));
handle.next();
nEntries++;
}
/* Sort entryDataSeen by offset, in increasing order. */
auto compareFn = [](const EntryInfo& a, const EntryInfo& b) -> bool {
return a.first < b.first;
};
std::sort(entryDataSeen.begin(), entryDataSeen.end(), compareFn);
/* Check that the EntryData objects are contiguous. */
bool dataOffsetError = false;
EntryDataOffset expectedNextOffset = getFirstEntryDataOffset(bucket);
for (unsigned int i = 0; i < entryDataSeen.size(); i++) {
if (entryDataSeen[i].first != expectedNextOffset) {
/* The current entry does not start where expected. */
dataOffsetError = true;
break;
}
/* The next entry should start right after the current entry. */
expectedNextOffset += entryDataSeen[i].second + sizeof(EntryData);
}
/* the last EntryData should have ended at the very end of the
* bucket. */
if (expectedNextOffset != kBucketDataSize) {
dataOffsetError = true;
}
/* Throw an assert if something is wrong. */
if (headerOffsetError || dataOffsetError ||
totalSize != bucket->totalDataSize || nEntries != bucket->numEntries) {
/* Copy the bucket locally for easier debugging in case the slab is
* not in the core dump file. */
Bucket bucketCopy;
memcpy(&bucketCopy, bucket, sizeof(Bucket));
XDCHECK(false);
}
}
/**
* Shift the EntryData objects that belong to a particular window of
* EntryData objects. This takes care of modifying the EntryHdr
* headers that correspond to each shifted EntryData object so that these
* headers have an updated dataOffset.
*
* This is used by:
* - del: When an EntryData is deleted, we need to shift the
* EntryData objects that precede it.
* - updateVal: when an EntryData is updated with a new value of a size
* smaller than the old one, the EntryData objects that precede the
* entry are shifted in order to free the space that is not used
* anymore.
* - evictEntries: when an EntryData is evicted, we need to shift the
* EntryData objects that precede it (same as del).
*
* @param bucket Bucket from which to shift data.
* @param src Pointer to the first EntryData object to be
* shifted.
* @param shiftDistance Offset to use for shifting the EntryData
* objects.
* @param shiftedChunkSize Amount of bytes to be shifted. Starting from src,
* all the EntryData objects that span in the
* window defined by this value will be shifted.
* I.e all the EntryData objects that have an
* offset between src and src + shiftDistance will
* be shifted.
*/
static void shiftEntriesData(Bucket* bucket,
EntryData* src,
EntryDataOffset shiftDistance,
size_t shiftedChunkSize) {
char* dst = (reinterpret_cast<char*>(src) + shiftDistance);
/* Adjust the dataOffset of all the EntryHdr headers that correspond
* to the EntryData objects we are going to shift. */
EntryData* cur = src;
while (reinterpret_cast<char*>(cur) <
reinterpret_cast<char*>(src) + shiftedChunkSize) {
EntryHdr* header = getEntryHeader(bucket, cur->headerIndex);
header->dataOffset += shiftDistance;
cur = getNextEntryData(bucket, cur);
}
/* Move the EntryData objects. */
memmove(dst, src, shiftedChunkSize);
}
/**
* Evict as many entries as needed, starting from the oldest, until the
* amount of free space in the bucket is equal or greater than
* requiredSpace. This is used by insert.
*
* 1) Walk through the EntryHdr headers starting from the LRU while
* updating a counter that contains the memory made available if evicted
* all entries seen so far. Stop when this counter is high enough (i.e
* bigger than requiredSpace).
* 2) Sort the list of entries we decided to evict by their offset in the
* "Data" section, in reverse order. The cost of sorting the evicted
* entries should be negligible since the number of evicted entries
* should be small.
* 3) Move the "blocks" of EntryData objects that are between two evicted
* entries, one by one.
* 4) reduce the number of entries and the total size of the entries in the
* bucket.
*
* @param bucket Bucket from which to evict entries.
* @param requiredSpace Amount of space required (in bytes).
* @param evictionCb callback to be called for each evicted entry.
* @return true if at least one entry was evicted.
*/
static bool evictEntries(Bucket* bucket,
size_t requiredSpace,
EvictionCb evictionCb) {
/* Will contain all the entries we decide to evict. */
std::vector<EntryHdr*> arr;
size_t freeSpace = kBucketDataSize - bucket->totalDataSize -
sizeof(EntryHdr) * bucket->numEntries;
if (freeSpace >= requiredSpace && bucket->numEntries != kMaxEntries) {
/* We already have enough space. */
return false;
}
/* 1) Walk through the EntryHdr headers starting from the LRU while
* updating a counter that contains the memory made available if
* evicted all entries seen so far. Stop when this counter is high
* enough (i.e bigger than requiredSpace). */
/* Start from the lru, which is the last entry in the entry header. */
unsigned int evictedPos = bucket->numEntries - 1;
/* Convenient lambda function for evicting an entry. */
auto evictOneMoreEntryFn = [&]() {
EntryHdr* entry = getEntryHeader(bucket, evictedPos);
/* There should always be at least one entry for eviction until
* we have enough space. */
XDCHECK_GE(reinterpret_cast<uintptr_t>(entry),
reinterpret_cast<uintptr_t>(getEntryHeader(bucket, 0)));
/* Evicting the entry releases the space of the entry data and the
* space of the entry header. */
freeSpace += entry->dataSize + sizeof(EntryHdr) + sizeof(EntryData);
arr.push_back(entry);
evictionCb(EntryHandle(bucket, evictedPos));
evictedPos--;
};
/* Evict at least one entry if the current number of entries is about to
* overflow. This should not happen very often.
* If this happens often, this means that the bucket is too big for the
* entries, and we are wasting a lot of memory. */
if (bucket->numEntries == kMaxEntries) {
XLOG(ERR) << "Overflow in number of entries in a bucket";
evictOneMoreEntryFn();
}
/* Evict entries until we have enough space. */
while (freeSpace < requiredSpace) {
evictOneMoreEntryFn();
}
/* 2) Sort the list of entries we decided to evict by their offset in
* the "Data" section, in increasing order, so that the left-most
* evicted entry is the first element of the array, and the right-most
* evected entry is the last element.
* The cost of sorting the evicted entries should be negligible since
* the number of evicted entries should be small. */
auto compareFn = [](const EntryHdr* a, const EntryHdr* b) -> bool {
return a->dataOffset < b->dataOffset;
};
XDCHECK_GT(arr.size(), 0);
std::sort(arr.begin(), arr.end(), compareFn);
/* 3) Move the "chunks" of EntryData objects that are between two
* evicted entries, one by one. */
size_t shiftDistance = 0;
EntryDataOffset chunkStartOffset;
int i = 0;
for (i = arr.size() - 1; i >= 0; i--) {
/* Amount of bytes by which to shift the chunk. We are removing the
* current entry 'arr[i]', so we shift by the total amount of bytes
* used by its corresponding EntryData. Here, we increment
* shiftDistance instead of simply setting it to a new value because
* we want to take into account the space made available by the
* shifting performed by the previous iterations of this loop. */
shiftDistance += arr[i]->dataSize + sizeof(EntryData);
/* Compute the start offset of the chunk we are moving.
* The chunk starts right at the end of the removed entry that is
* at the left of the current removed entry. If the current removed
* entry is the left-most, the start offset of the chunk is the
* beginning of the data. */
if (i == 0) {
chunkStartOffset = getFirstEntryDataOffset(bucket);
} else {
chunkStartOffset =
arr[i - 1]->dataOffset + arr[i - 1]->dataSize + sizeof(EntryData);
}
/* Size of the chunk to be moved. */
size_t shiftedChunkSize = arr[i]->dataOffset - chunkStartOffset;
if (shiftedChunkSize == 0) {
/* The current removed entry is contiguous with the removed
* entry on the left, so there is nothing between them to be
* moved. */
continue;
}
/* Pointer to the beginning of that chunk. */
EntryData* src =
reinterpret_cast<EntryData*>(bucket->data + chunkStartOffset);
shiftEntriesData(bucket, src, shiftDistance, shiftedChunkSize);
}
/* 4) reduce the number of entries and the total size of the entries in
* the bucket. */
XDCHECK_GE(bucket->numEntries, arr.size());
XDCHECK_GE(bucket->totalDataSize, shiftDistance);
bucket->numEntries -= arr.size();
bucket->totalDataSize -= shiftDistance;
return true;
}
/* Utility functions */
/**
* Get the offset where the "Data" section of the given bucket starts.
*
* @param bucket Bucket for which we want to compute the offset where the
* data section begins.
* @return Offset where the "Data" section of the bucket starts.
*/
constexpr static size_t getFirstEntryDataOffset(Bucket* bucket) {
return kBucketDataSize - bucket->totalDataSize;
}
/**
* Return a pointer to the first EntryData object in a bucket.
*
* @param bucket Bucket from which to retrieve the first EntryData.
* @return Pointer to the first EntryData object in the bucket.
*/
constexpr static EntryData* getFirstEntryData(Bucket* bucket) {
return reinterpret_cast<EntryData*>(bucket->data +
getFirstEntryDataOffset(bucket));
}
/**
* Return a pointer to the EntryHdr header at position headerPos.
*
* @param bucket Bucket from which to retrieve the EntryHdr header.
* @param headerPos Position of the entry header.
* @return Pointer to the EntryHdr header at position headerPos.
*/
constexpr static EntryHdr* getEntryHeader(Bucket* bucket, uint8_t headerPos) {
return reinterpret_cast<EntryHdr*>(bucket->data) + headerPos;
}
/**
* Get a pointer to the entry data pointed to by the entry header at
* position headerPos.
* @param bucket Bucket in which to look for the data.
* @param headerPos Position of the header of the entry for which to
* retrieve the data.
* @return Pointer to the entry data for the given entry header.
*/
constexpr static EntryData* getEntryData(Bucket* bucket, uint8_t headerPos) {
return reinterpret_cast<EntryData*>(
bucket->data + getEntryHeader(bucket, headerPos)->dataOffset);
}
/*
* Get the EntryData object that follows a given EntryData object.
* It's the caller's responsibility to check that the returned pointer
* does not overflow the bucket.
* This is called by shiftEntriesData.
*
* @param bucket Bucket from which to find the bucket EntryData object.
* @param entryData Entry data for which we want to find the next one.
* @return Entry data that follows entryData.
*/
constexpr static EntryData* getNextEntryData(Bucket* bucket,
EntryData* entryData) {
return reinterpret_cast<EntryData*>(
reinterpret_cast<char*>(entryData) + sizeof(EntryData) +
getEntryHeader(bucket, entryData->headerIndex)->dataSize);
}
/**
* Check that the given value will not overflow if written to an integral of
* type T.
*/
template <typename T>
static void checkOverflow(size_t val) {
XDCHECK_LE(val, std::numeric_limits<T>::max());
}
};
template <typename CompactCacheDescriptor>
constexpr size_t VariableLruBucket<CompactCacheDescriptor>::kBucketDataSize;
template <typename CompactCacheDescriptor>
constexpr size_t VariableLruBucket<CompactCacheDescriptor>::kMaxValueSize;
} // namespace cachelib
} // namespace facebook
| 13,705 |
3,395 |
<reponame>MitchellTesla/datasets<filename>datasets/elkarhizketak/elkarhizketak.py
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ElkarHizketak: Conversational Question Answering dataset in Basque"""
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{otegi-etal-2020-conversational,
title = "{Conversational Question Answering in Low Resource Scenarios: A Dataset and Case Study for {B}asque}",
author = "<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME>",
booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
year = "2020",
publisher = "European Language Resources Association",
url = "https://aclanthology.org/2020.lrec-1.55",
pages = "436--442",
ISBN = "979-10-95546-34-4",
}
"""
_DESCRIPTION = """
ElkarHizketak is a low resource conversational Question Answering
(QA) dataset in Basque created by Basque speaker volunteers. The
dataset contains close to 400 dialogues and more than 1600 question
and answers, and its small size presents a realistic low-resource
scenario for conversational QA systems. The dataset is built on top of
Wikipedia sections about popular people and organizations. The
dialogues involve two crowd workers: (1) a student ask questions after
reading a small introduction about the person, but without seeing the
section text; and (2) a teacher answers the questions selecting a span
of text of the section. """
_HOMEPAGE = "http://ixa.si.ehu.es/node/12934"
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International Public License (CC BY-SA 4.0)"
_URLs = {
"train": "http://ixa2.si.ehu.es/convai/elkarhizketak-v1.0/elkarhizketak-train-v1.0.json",
"validation": "http://ixa2.si.ehu.es/convai/elkarhizketak-v1.0/elkarhizketak-dev-v1.0.json",
"test": "http://ixa2.si.ehu.es/convai/elkarhizketak-v1.0/elkarhizketak-test-v1.0.json",
}
class Elkarhizketak(datasets.GeneratorBasedBuilder):
"""ElkarHizketak: Conversational Question Answering dataset in Basque. Version 1.0."""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=VERSION,
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"dialogue_id": datasets.Value("string"),
"wikipedia_page_title": datasets.Value("string"),
"background": datasets.Value("string"),
"section_title": datasets.Value("string"),
"context": datasets.Value("string"),
"turn_id": datasets.Value("string"),
"question": datasets.Value("string"),
"yesno": datasets.ClassLabel(names=["y", "n", "x"]),
"answers": datasets.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
"input_text": datasets.Value("string"),
}
),
"orig_answer": {
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
},
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_URLs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": data_dir["validation"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": data_dir["test"],
},
),
]
def _generate_examples(self, filepath):
"""Yields examples."""
logger.info("generating examples from = %s", filepath)
key = 0
with open(filepath, encoding="utf-8") as f:
elkarhizketak = json.load(f)
for section in elkarhizketak["data"]:
wiki_page_title = section.get("title", "").strip()
background = section.get("background", "").strip()
section_title = section.get("section_title", "").strip()
for dialogue in section["paragraphs"]:
context = dialogue["context"].strip()
dialogue_id = dialogue["id"]
for qa in dialogue["qas"]:
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
input_texts = [answer["input_text"].strip() for answer in qa["answers"]]
yield key, {
"wikipedia_page_title": wiki_page_title,
"background": background,
"section_title": section_title,
"context": context,
"dialogue_id": dialogue_id,
"question": qa["question"],
"turn_id": qa["id"],
"yesno": qa["yesno"],
"answers": {
"answer_start": answer_starts,
"text": answers,
"input_text": input_texts,
},
"orig_answer": {
"answer_start": qa["orig_answer"]["answer_start"],
"text": qa["orig_answer"]["text"],
},
}
key += 1
| 3,440 |
575 |
<reponame>sarang-apps/darshan_browser
#!/usr/bin/env vpython
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shutil
import tempfile
import unittest
from core import path_util
path_util.AddTelemetryToPath()
from telemetry import decorators
import mock
import process_perf_results as ppr_module
UUID_SIZE = 36
class _FakeLogdogStream(object):
def write(self, data):
del data # unused
def close(self):
pass
def get_viewer_url(self):
return 'http://foobar.not.exit'
# pylint: disable=protected-access
class DataFormatParsingUnitTest(unittest.TestCase):
def tearDown(self):
ppr_module._data_format_cache = {}
def testGtest(self):
with mock.patch('__builtin__.open', mock.mock_open(read_data='{}')):
self.assertTrue(ppr_module._is_gtest('test.json'))
self.assertFalse(ppr_module._is_histogram('test.json'))
self.assertTrue(ppr_module._is_gtest('test.json'))
self.assertFalse(ppr_module._is_histogram('test.json'))
def testChartJSON(self):
with mock.patch('__builtin__.open',
mock.mock_open(read_data='{"charts": 1}')):
self.assertFalse(ppr_module._is_gtest('test.json'))
self.assertFalse(ppr_module._is_histogram('test.json'))
self.assertFalse(ppr_module._is_gtest('test.json'))
self.assertFalse(ppr_module._is_histogram('test.json'))
def testHistogram(self):
with mock.patch('__builtin__.open', mock.mock_open(read_data='[]')):
self.assertTrue(ppr_module._is_histogram('test.json'))
self.assertFalse(ppr_module._is_gtest('test.json'))
self.assertTrue(ppr_module._is_histogram('test.json'))
self.assertFalse(ppr_module._is_gtest('test.json'))
class ProcessPerfResultsIntegrationTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.output_json = os.path.join(self.test_dir, 'output.json')
self.task_output_dir = os.path.join(
os.path.dirname(__file__), 'testdata', 'task_output_dir')
m1 = mock.patch(
'process_perf_results.logdog_helper.text',
return_value = 'http://foo.link')
m1.start()
self.addCleanup(m1.stop)
m2 = mock.patch(
'process_perf_results.logdog_helper.open_text',
return_value=_FakeLogdogStream())
m2.start()
self.addCleanup(m2.stop)
m3 = mock.patch('core.results_dashboard.SendResults')
m3.start()
self.addCleanup(m3.stop)
def tearDown(self):
shutil.rmtree(self.test_dir)
@decorators.Disabled('chromeos') # crbug.com/865800
@decorators.Disabled('win') # crbug.com/860677, mock doesn't integrate well
# with multiprocessing on Windows.
@decorators.Disabled('all') # crbug.com/967125
def testIntegration(self):
build_properties = json.dumps({
'perf_dashboard_machine_group': 'test-builder',
'buildername': 'test-builder',
'buildnumber': 777,
'got_v8_revision': 'beef1234',
'got_revision_cp': 'refs/heads/master@{#1234}',
'got_webrtc_revision': 'fee123',
'git_revision': 'deadbeef',
'buildbucket': r"""{"build":
{"bucket": "master.tryserver.chromium.perf",
"created_by": "user:foo",
"created_ts": "1535490272757820",
"id": "8936915467712010816",
"project": "chrome",
"lease_key": "461228535",
"tags": ["builder:android-go-perf", "buildset:patch/1194825/3",
"cq_experimental:False",
"master:master.tryserver.chromium.perf",
"user_agent:cq"]}}"""
})
output_results_dir = os.path.join(self.test_dir, 'outputresults')
os.mkdir(output_results_dir)
return_code, benchmark_upload_result_map = ppr_module.process_perf_results(
self.output_json, configuration_name='test-builder',
build_properties=build_properties,
task_output_dir=self.task_output_dir,
smoke_test_mode=False,
output_results_dir=output_results_dir)
# Output filenames are prefixed with a UUID. Strip it off.
output_results = {
filename[UUID_SIZE:]: os.stat(os.path.join(
output_results_dir, filename)).st_size
for filename in os.listdir(output_results_dir)}
self.assertEquals(32, len(output_results))
self.assertLess(10 << 10, output_results["power.desktop.reference"])
self.assertLess(10 << 10, output_results["blink_perf.image_decoder"])
self.assertLess(10 << 10, output_results["octane.reference"])
self.assertLess(10 << 10, output_results["power.desktop"])
self.assertLess(10 << 10, output_results["speedometer-future"])
self.assertLess(10 << 10, output_results["blink_perf.owp_storage"])
self.assertLess(10 << 10, output_results["memory.desktop"])
self.assertLess(10 << 10, output_results["wasm"])
self.assertLess(10 << 10, output_results[
"dummy_benchmark.histogram_benchmark_1"])
self.assertLess(10 << 10, output_results[
"dummy_benchmark.histogram_benchmark_1.reference"])
self.assertLess(10 << 10, output_results["wasm.reference"])
self.assertLess(10 << 10, output_results["speedometer"])
self.assertLess(10 << 10, output_results[
"memory.long_running_idle_gmail_tbmv2"])
self.assertLess(10 << 10, output_results["v8.runtime_stats.top_25"])
self.assertLess(1 << 10, output_results[
"dummy_benchmark.noisy_benchmark_1"])
self.assertLess(10 << 10, output_results["blink_perf.svg"])
self.assertLess(10 << 10, output_results[
"v8.runtime_stats.top_25.reference"])
self.assertLess(10 << 10, output_results["jetstream.reference"])
self.assertLess(10 << 10, output_results["jetstream"])
self.assertLess(10 << 10, output_results["speedometer2-future.reference"])
self.assertLess(10 << 10, output_results["blink_perf.svg.reference"])
self.assertLess(10 << 10, output_results[
"blink_perf.image_decoder.reference"])
self.assertLess(10 << 10, output_results["power.idle_platform.reference"])
self.assertLess(10 << 10, output_results["power.idle_platform"])
self.assertLess(1 << 10, output_results[
"dummy_benchmark.noisy_benchmark_1.reference"])
self.assertLess(10 << 10, output_results["speedometer-future.reference"])
self.assertLess(10 << 10, output_results[
"memory.long_running_idle_gmail_tbmv2.reference"])
self.assertLess(10 << 10, output_results["memory.desktop.reference"])
self.assertLess(10 << 10, output_results[
"blink_perf.owp_storage.reference"])
self.assertLess(10 << 10, output_results["octane"])
self.assertLess(10 << 10, output_results["speedometer.reference"])
self.assertEquals(return_code, 1)
self.assertEquals(benchmark_upload_result_map,
{
"power.desktop.reference": True,
"blink_perf.image_decoder": True,
"octane.reference": True,
"power.desktop": True,
"speedometer-future": True,
"blink_perf.owp_storage": True,
"memory.desktop": True,
"wasm": True,
"dummy_benchmark.histogram_benchmark_1": True,
"dummy_benchmark.histogram_benchmark_1.reference": True,
"wasm.reference": True,
"speedometer": True,
"memory.long_running_idle_gmail_tbmv2": True,
"v8.runtime_stats.top_25": True,
"dummy_benchmark.noisy_benchmark_1": True,
"blink_perf.svg": True,
"v8.runtime_stats.top_25.reference": True,
"jetstream.reference": True,
"jetstream": True,
"speedometer2-future.reference": True,
"speedometer2-future": False, # Only this fails due to malformed data
"blink_perf.svg.reference": True,
"blink_perf.image_decoder.reference": True,
"power.idle_platform.reference": True,
"power.idle_platform": True,
"dummy_benchmark.noisy_benchmark_1.reference": True,
"speedometer-future.reference": True,
"memory.long_running_idle_gmail_tbmv2.reference": True,
"memory.desktop.reference": True,
"blink_perf.owp_storage.reference": True,
"octane": True,
"speedometer.reference": True
})
class ProcessPerfResults_HardenedUnittest(unittest.TestCase):
def setUp(self):
self._logdog_text = mock.patch(
'process_perf_results.logdog_helper.text',
return_value = 'http://foo.link')
self._logdog_text.start()
self.addCleanup(self._logdog_text.stop)
self._logdog_open_text = mock.patch(
'process_perf_results.logdog_helper.open_text',
return_value=_FakeLogdogStream())
self._logdog_open_text.start()
self.addCleanup(self._logdog_open_text.stop)
@decorators.Disabled('chromeos') # crbug.com/956178
def test_handle_perf_json_test_results_IOError(self):
directory_map = {
'benchmark.example': ['directory_that_does_not_exist']}
test_results_list = []
ppr_module._handle_perf_json_test_results(directory_map, test_results_list)
self.assertEqual(test_results_list, [])
@decorators.Disabled('chromeos') # crbug.com/956178
def test_last_shard_has_no_tests(self):
benchmark_name = 'benchmark.example'
temp_parent_dir = tempfile.mkdtemp(suffix='test_results_outdir')
try:
shard1_dir = os.path.join(temp_parent_dir, 'shard1')
os.mkdir(shard1_dir)
shard2_dir = os.path.join(temp_parent_dir, 'shard2')
os.mkdir(shard2_dir)
with open(os.path.join(shard1_dir, 'test_results.json'), 'w') as fh:
fh.write(
'{"version": 3, "tests":{"v8.browsing_desktop-future": "blah"}}')
with open(os.path.join(shard2_dir, 'test_results.json'), 'w') as fh:
fh.write('{"version": 3,"tests":{}}')
directory_map = {
benchmark_name: [shard1_dir, shard2_dir]}
benchmark_enabled_map = ppr_module._handle_perf_json_test_results(
directory_map, [])
self.assertTrue(benchmark_enabled_map[benchmark_name],
'Regression test for crbug.com/984565')
finally:
shutil.rmtree(temp_parent_dir)
@decorators.Disabled('chromeos') # crbug.com/956178
def test_merge_perf_results_IOError(self):
results_filename = None
directories = ['directory_that_does_not_exist']
ppr_module._merge_perf_results('benchmark.example', results_filename,
directories)
@decorators.Disabled('chromeos') # crbug.com/956178
def test_handle_perf_logs_no_log(self):
tempdir = tempfile.mkdtemp()
try:
dir1 = os.path.join(tempdir, '1')
dir2 = os.path.join(tempdir, '2')
os.makedirs(dir1)
os.makedirs(dir2)
with open(os.path.join(dir1, 'benchmark_log.txt'), 'w') as logfile:
logfile.write('hello world')
directory_map = {
'benchmark.with.log': [dir1],
'benchmark.with.no.log': [dir2],
}
extra_links = {}
ppr_module._handle_perf_logs(directory_map, extra_links)
finally:
shutil.rmtree(tempdir)
if __name__ == '__main__':
unittest.main()
| 4,867 |
5,169 |
<gh_stars>1000+
{
"name": "DataManager",
"version": "0.2.0",
"summary": "DataManager is a small utility class that helps you manage Core Data.",
"description": "DataManager takes care of Core Data boilerplate code for you. It handles setting up the Core Data stack with support for asynchronous saves. It also includes a few simple fetch and deletion methods.",
"homepage": "https://github.com/metova/DataManager",
"license": "MIT",
"authors": {
"<NAME>": "<EMAIL>"
},
"platforms": {
"ios": "8.0",
"osx": "10.9",
"watchos": "2.0",
"tvos": "9.0"
},
"source": {
"git": "https://github.com/metova/DataManager.git",
"tag": "0.2.0"
},
"source_files": "Sources/*.swift",
"frameworks": "CoreData"
}
| 275 |
356 |
<gh_stars>100-1000
/* Detection of cycle or loop in a Singly linked list
Their may exist a node in a linked list which may point to another node present already in
the linked list before it, this leads to formation of loop in the linked list.
Solution: We use hare and tortoise algorithm to solve the problem. We maintain to pointers hare and tortoise . Tortoise pointer move one step at one iteration
and hare pointer move by two steps in one iteration. At every iteration we see whether hare is equal to tortoise, if yes then it means there exist a loop in
the list and we return true. Simultaneosly we also check if tortoise or hare or hare->next is NULL, if yes it means there is no cycle present and we return false.
*/
#include <bits/stdc++.h>
using namespace std;
//Creation of Node class for Linked List
class Node{
public :
int data;
Node *next;
explicit Node(int element){
data = element;
next = NULL;
}
};
//Creation of LinkedList class with inserAtEnd function
class LinkedList{
public:
Node * head = NULL;
Node * tail = NULL;
void insertAtEnd(int data){
Node * node = new Node(data);
if(head==NULL){
head = node;
tail = node;
return;
}
tail -> next = node;
tail = node;
}
};
//Hare and Tortoise Algorithm
//Both pointers meet at point if their exist a loop in the list ;otherwise any of the pointer encounters null
bool detectCycle(Node *head){
Node *tortoise = head,*hare = head;
do{
if(hare==NULL || hare->next==NULL || tortoise==NULL){
return false;
}
tortoise = tortoise->next;
hare = hare->next->next;
}while(hare!=tortoise);
return true;
}
int main(){
LinkedList *ls = new LinkedList();
ls->insertAtEnd(1);
ls->insertAtEnd(2);
ls->insertAtEnd(3);
ls->tail -> next = ls->head;
cout << detectCycle(ls->head) << endl;
return 0;
}
/*Test Case
Input : 1->2->3
| |
^----<
Output : 1
Input : 1->2->3->4->5
Output : 0
Time Complexity
O(N)
Space Complexity
O(1)
*/
| 808 |
4,224 |
/****************************************************************************
*
* Copyright (c) 2015-2021 PX4 Development Team. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name PX4 nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/**
* @file Integrator.hpp
*
* A resettable integrator
*
* @author <NAME> <<EMAIL>>
* @author <NAME> <<EMAIL>>
*/
#pragma once
#include <mathlib/mathlib.h>
#include <matrix/math.hpp>
class Integrator
{
public:
Integrator() = default;
~Integrator() = default;
static constexpr float DT_MIN{1e-6f}; // 1 microsecond
static constexpr float DT_MAX{static_cast<float>(UINT16_MAX) * 1e-6f};
/**
* Put an item into the integral.
*
* @param timestamp Timestamp of the current value.
* @param val Item to put.
* @return true if data was accepted and integrated.
*/
inline void put(const matrix::Vector3f &val, const float dt)
{
if ((dt > DT_MIN) && (_integral_dt + dt < DT_MAX)) {
_alpha += integrate(val, dt);
} else {
reset();
_last_val = val;
}
}
/**
* Set reset interval during runtime. This won't reset the integrator.
*
* @param reset_interval New reset time interval for the integrator in microseconds.
*/
void set_reset_interval(uint32_t reset_interval_us) { _reset_interval_min = reset_interval_us * 1e-6f; }
/**
* Set required samples for reset. This won't reset the integrator.
*
* @param reset_samples New reset time interval for the integrator.
*/
void set_reset_samples(uint8_t reset_samples) { _reset_samples_min = reset_samples; }
uint8_t get_reset_samples() const { return _reset_samples_min; }
/**
* Is the Integrator ready to reset?
*
* @return true if integrator has sufficient data (minimum interval & samples satisfied) to reset.
*/
inline bool integral_ready() const { return (_integrated_samples >= _reset_samples_min) || (_integral_dt >= _reset_interval_min); }
void reset()
{
_alpha.zero();
_integral_dt = 0;
_integrated_samples = 0;
}
/* Reset integrator and return current integral & integration time
*
* @param integral_dt Get the dt in us of the current integration.
* @return true if integral valid
*/
bool reset(matrix::Vector3f &integral, uint16_t &integral_dt)
{
if (integral_ready()) {
integral = _alpha;
integral_dt = roundf(_integral_dt * 1e6f); // seconds to microseconds
reset();
return true;
}
return false;
}
protected:
inline matrix::Vector3f integrate(const matrix::Vector3f &val, const float dt)
{
// Use trapezoidal integration to calculate the delta integral
_integrated_samples++;
_integral_dt += dt;
const matrix::Vector3f delta_alpha{(val + _last_val) *dt * 0.5f};
_last_val = val;
return delta_alpha;
}
matrix::Vector3f _alpha{0.f, 0.f, 0.f}; /**< integrated value before coning corrections are applied */
matrix::Vector3f _last_val{0.f, 0.f, 0.f}; /**< previous input */
float _integral_dt{0};
float _reset_interval_min{0.005f}; /**< the interval after which the content will be published and the integrator reset */
uint8_t _reset_samples_min{1};
uint8_t _integrated_samples{0};
};
class IntegratorConing : public Integrator
{
public:
IntegratorConing() = default;
~IntegratorConing() = default;
/**
* Put an item into the integral.
*
* @param timestamp Timestamp of the current value.
* @param val Item to put.
* @return true if data was accepted and integrated.
*/
inline void put(const matrix::Vector3f &val, const float dt)
{
if ((dt > DT_MIN) && (_integral_dt + dt < DT_MAX)) {
// Use trapezoidal integration to calculate the delta integral
const matrix::Vector3f delta_alpha{integrate(val, dt)};
// Calculate coning corrections
// Coning compensation derived by <NAME> and <NAME>,
// following:
// Strapdown Inertial Navigation Integration Algorithm Design Part 1: Attitude Algorithms
// Sourced: https://arc.aiaa.org/doi/pdf/10.2514/2.4228
// Simulated: https://github.com/priseborough/InertialNav/blob/master/models/imu_error_modelling.m
_beta += ((_last_alpha + _last_delta_alpha * (1.f / 6.f)) % delta_alpha) * 0.5f;
_last_delta_alpha = delta_alpha;
_last_alpha = _alpha;
// accumulate delta integrals
_alpha += delta_alpha;
} else {
reset();
_last_val = val;
}
}
void reset()
{
Integrator::reset();
_beta.zero();
_last_alpha.zero();
}
const matrix::Vector3f &accumulated_coning_corrections() const { return _beta; }
/* Reset integrator and return current integral & integration time
*
* @param integral_dt Get the dt in us of the current integration.
* @return true if integral valid
*/
bool reset(matrix::Vector3f &integral, uint16_t &integral_dt)
{
if (Integrator::reset(integral, integral_dt)) {
// apply coning corrections
integral += _beta;
_beta.zero();
_last_alpha.zero();
return true;
}
return false;
}
private:
matrix::Vector3f _beta{0.f, 0.f, 0.f}; /**< accumulated coning corrections */
matrix::Vector3f _last_delta_alpha{0.f, 0.f, 0.f}; /**< integral from previous previous sampling interval */
matrix::Vector3f _last_alpha{0.f, 0.f, 0.f}; /**< previous value of _alpha */
};
| 2,281 |
977 |
<filename>src/realm/util/network_ssl.cpp
#include <cstring>
#include <mutex>
#include <realm/string_data.hpp>
#include <realm/util/cf_str.hpp>
#include <realm/util/features.h>
#include <realm/util/network_ssl.hpp>
#if REALM_HAVE_OPENSSL
#ifdef _WIN32
#include <Windows.h>
#else
#include <pthread.h>
#endif
#include <openssl/conf.h>
#include <openssl/x509v3.h>
#elif REALM_HAVE_SECURE_TRANSPORT
#include <fstream>
#include <vector>
#endif
using namespace realm;
using namespace realm::util;
using namespace realm::util::network;
using namespace realm::util::network::ssl;
namespace {
#if REALM_INCLUDE_CERTS
const char* root_certs[] = {
#include <realm/sync/noinst/root_certs.hpp>
};
bool verify_certificate_from_root_cert(const char* root_cert, X509* server_cert)
{
bool verified = false;
BIO* bio;
X509* x509;
EVP_PKEY* pkey;
bio = BIO_new_mem_buf(const_cast<char*>(root_cert), -1);
if (!bio)
goto out;
x509 = PEM_read_bio_X509(bio, nullptr, nullptr, nullptr);
if (!x509)
goto free_bio;
pkey = X509_get_pubkey(x509);
if (!pkey)
goto free_x509;
verified = (X509_verify(server_cert, pkey) == 1);
EVP_PKEY_free(pkey);
free_x509:
X509_free(x509);
free_bio:
BIO_free(bio);
out:
return verified;
}
bool verify_certificate_from_root_certs(X509* server_cert, util::Logger* logger)
{
std::size_t num_certs = sizeof(root_certs) / sizeof(root_certs[0]);
if (logger)
logger->info("Verifying server SSL certificate using %1 root certificates", num_certs);
for (std::size_t i = 0; i < num_certs; ++i) {
const char* root_cert = root_certs[i];
bool verified = verify_certificate_from_root_cert(root_cert, server_cert);
if (verified) {
if (logger)
logger->debug("Server SSL certificate verified using root certificate(%1):\n%2", i, root_cert);
return true;
}
}
if (logger)
logger->error("The server certificate was not signed by any root certificate");
return false;
}
#endif // REALM_INCLUDE_CERTS
#if REALM_HAVE_OPENSSL && (OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER))
// These must be made to execute before main() is called, i.e., before there is
// any chance of threads being spawned.
struct OpensslInit {
std::unique_ptr<std::mutex[]> mutexes;
OpensslInit();
~OpensslInit();
};
OpensslInit g_openssl_init;
void openssl_locking_func(int mode, int i, const char*, int)
{
if (mode & CRYPTO_LOCK) {
g_openssl_init.mutexes[i].lock();
}
else {
g_openssl_init.mutexes[i].unlock();
}
}
OpensslInit::OpensslInit()
{
SSL_library_init();
SSL_load_error_strings();
OpenSSL_add_all_algorithms();
std::size_t n = CRYPTO_num_locks();
mutexes.reset(new std::mutex[n]); // Throws
CRYPTO_set_locking_callback(&openssl_locking_func);
/*
#if !defined(SSL_OP_NO_COMPRESSION) && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
null_compression_methods_ = sk_SSL_COMP_new_null();
#endif
*/
}
OpensslInit::~OpensslInit()
{
/*
#if !defined(SSL_OP_NO_COMPRESSION) && (OPENSSL_VERSION_NUMBER >= 0x00908000L)
sk_SSL_COMP_free(null_compression_methods_);
#endif
*/
CRYPTO_set_locking_callback(0);
ERR_free_strings();
#if OPENSSL_VERSION_NUMBER < 0x10000000L
ERR_remove_state(0);
#else
ERR_remove_thread_state(0);
#endif
EVP_cleanup();
CRYPTO_cleanup_all_ex_data();
CONF_modules_unload(1);
}
#endif // REALM_HAVE_OPENSSL && (OPENSSL_VERSION_NUMBER < 0x10100000L || defined(LIBRESSL_VERSION_NUMBER))
} // unnamed namespace
namespace realm {
namespace util {
namespace network {
namespace ssl {
ErrorCategory error_category;
const char* ErrorCategory::name() const noexcept
{
return "realm.util.network.ssl";
}
std::string ErrorCategory::message(int value) const
{
switch (Errors(value)) {
case Errors::certificate_rejected:
return "SSL certificate rejected"; // Throws
}
REALM_ASSERT(false);
return {};
}
bool ErrorCategory::equivalent(const std::error_code& ec, int condition) const noexcept
{
switch (Errors(condition)) {
case Errors::certificate_rejected:
#if REALM_HAVE_OPENSSL
if (ec.category() == openssl_error_category) {
// FIXME: Why use string comparison here? Seems like it would
// suffice to compare the underlying numerical error codes.
std::string message = ec.message();
return ((message == "certificate verify failed" || message == "sslv3 alert bad certificate" ||
message == "sslv3 alert certificate expired" ||
message == "sslv3 alert certificate revoked"));
}
#elif REALM_HAVE_SECURE_TRANSPORT
if (ec.category() == secure_transport_error_category) {
switch (ec.value()) {
case errSSLXCertChainInvalid:
return true;
default:
break;
}
}
#endif
return false;
}
return false;
}
} // namespace ssl
OpensslErrorCategory openssl_error_category;
const char* OpensslErrorCategory::name() const noexcept
{
return "openssl";
}
std::string OpensslErrorCategory::message(int value) const
{
#if REALM_HAVE_OPENSSL
if (const char* s = ERR_reason_error_string(value))
return std::string(s); // Throws
#endif
return "Unknown OpenSSL error (" + util::to_string(value) + ")"; // Throws
}
SecureTransportErrorCategory secure_transport_error_category;
const char* SecureTransportErrorCategory::name() const noexcept
{
return "securetransport";
}
std::string SecureTransportErrorCategory::message(int value) const
{
#if REALM_HAVE_SECURE_TRANSPORT
#if __has_builtin(__builtin_available)
if (__builtin_available(iOS 11.3, macOS 10.3, tvOS 11.3, watchOS 4.3, *)) {
auto status = OSStatus(value);
void* reserved = nullptr;
if (auto message = adoptCF(SecCopyErrorMessageString(status, reserved)))
return cfstring_to_std_string(message.get());
}
#endif // __has_builtin(__builtin_available)
#endif // REALM_HAVE_SECURE_TRANSPORT
return std::string("Unknown SecureTransport error (") + util::to_string(value) + ")"; // Throws
}
namespace ssl {
const char* ProtocolNotSupported::what() const noexcept
{
return "SSL/TLS protocol not supported";
}
std::error_code Stream::handshake(std::error_code& ec)
{
REALM_ASSERT(!m_tcp_socket.m_read_oper || !m_tcp_socket.m_read_oper->in_use());
REALM_ASSERT(!m_tcp_socket.m_write_oper || !m_tcp_socket.m_write_oper->in_use());
m_tcp_socket.m_desc.ensure_blocking_mode(); // Throws
Want want = Want::nothing;
ssl_handshake(ec, want);
REALM_ASSERT(want == Want::nothing);
return ec;
}
std::error_code Stream::shutdown(std::error_code& ec)
{
REALM_ASSERT(!m_tcp_socket.m_write_oper || !m_tcp_socket.m_write_oper->in_use());
m_tcp_socket.m_desc.ensure_blocking_mode(); // Throws
Want want = Want::nothing;
ssl_shutdown(ec, want);
REALM_ASSERT(want == Want::nothing);
return ec;
}
#if REALM_HAVE_OPENSSL
void Context::ssl_init()
{
ERR_clear_error();
// Despite the name, SSLv23_method isn't specific to SSLv2 and SSLv3.
// It negotiates with the peer to pick the newest enabled protocol version.
const SSL_METHOD* method = SSLv23_method();
SSL_CTX* ssl_ctx = SSL_CTX_new(method);
if (REALM_UNLIKELY(!ssl_ctx)) {
std::error_code ec(int(ERR_get_error()), openssl_error_category);
throw std::system_error(ec);
}
// Disable use of older protocol versions (SSLv2 and SSLv3).
// Disable SSL compression by default, as compression is unavailable
// with Apple's Secure Transport API.
long options = 0;
options |= SSL_OP_NO_SSLv2;
options |= SSL_OP_NO_SSLv3;
options |= SSL_OP_NO_COMPRESSION;
SSL_CTX_set_options(ssl_ctx, options);
m_ssl_ctx = ssl_ctx;
}
void Context::ssl_destroy() noexcept
{
/*
if (handle_->default_passwd_callback_userdata) {
detail::password_callback_base* callback =
static_cast<detail::password_callback_base*>(handle_->default_passwd_callback_userdata); delete callback;
handle_->default_passwd_callback_userdata = nullptr;
}
if (SSL_CTX_get_app_data(handle_)) {
detail::verify_callback_base* callback =
static_cast<detail::verify_callback_base*>(SSL_CTX_get_app_data(handle_)); delete callback;
SSL_CTX_set_app_data(handle_, nullptr);
}
*/
SSL_CTX_free(m_ssl_ctx);
}
void Context::ssl_use_certificate_chain_file(const std::string& path, std::error_code& ec)
{
ERR_clear_error();
int ret = SSL_CTX_use_certificate_chain_file(m_ssl_ctx, path.c_str());
if (REALM_UNLIKELY(ret != 1)) {
ec = std::error_code(int(ERR_get_error()), openssl_error_category);
return;
}
ec = std::error_code();
}
void Context::ssl_use_private_key_file(const std::string& path, std::error_code& ec)
{
ERR_clear_error();
int type = SSL_FILETYPE_PEM;
int ret = SSL_CTX_use_PrivateKey_file(m_ssl_ctx, path.c_str(), type);
if (REALM_UNLIKELY(ret != 1)) {
ec = std::error_code(int(ERR_get_error()), openssl_error_category);
return;
}
ec = std::error_code();
}
void Context::ssl_use_default_verify(std::error_code& ec)
{
ERR_clear_error();
int ret = SSL_CTX_set_default_verify_paths(m_ssl_ctx);
if (ret != 1) {
ec = std::error_code(int(ERR_get_error()), openssl_error_category);
return;
}
ec = std::error_code();
}
void Context::ssl_use_verify_file(const std::string& path, std::error_code& ec)
{
ERR_clear_error();
int ret = SSL_CTX_load_verify_locations(m_ssl_ctx, path.c_str(), nullptr);
if (ret != 1) {
ec = std::error_code(int(ERR_get_error()), openssl_error_category);
return;
}
ec = std::error_code();
}
#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER)
class Stream::BioMethod {
public:
BIO_METHOD* bio_method;
BioMethod()
{
const char* name = "realm::util::Stream::BioMethod";
bio_method = BIO_meth_new(BIO_get_new_index(), name);
if (!bio_method)
throw util::bad_alloc();
BIO_meth_set_write(bio_method, &Stream::bio_write);
BIO_meth_set_read(bio_method, &Stream::bio_read);
BIO_meth_set_puts(bio_method, &Stream::bio_puts);
BIO_meth_set_gets(bio_method, nullptr);
BIO_meth_set_ctrl(bio_method, &Stream::bio_ctrl);
BIO_meth_set_create(bio_method, &Stream::bio_create);
BIO_meth_set_destroy(bio_method, &Stream::bio_destroy);
BIO_meth_set_callback_ctrl(bio_method, nullptr);
}
~BioMethod()
{
BIO_meth_free(bio_method);
}
};
#else
class Stream::BioMethod {
public:
BIO_METHOD* bio_method;
BioMethod()
{
bio_method = new BIO_METHOD{
BIO_TYPE_SOCKET, // int type
nullptr, // const char* name
&Stream::bio_write, // int (*bwrite)(BIO*, const char*, int)
&Stream::bio_read, // int (*bread)(BIO*, char*, int)
&Stream::bio_puts, // int (*bputs)(BIO*, const char*)
nullptr, // int (*bgets)(BIO*, char*, int)
&Stream::bio_ctrl, // long (*ctrl)(BIO*, int, long, void*)
&Stream::bio_create, // int (*create)(BIO*)
&Stream::bio_destroy, // int (*destroy)(BIO*)
nullptr // long (*callback_ctrl)(BIO*, int, bio_info_cb*)
};
}
~BioMethod()
{
delete bio_method;
}
};
#endif
Stream::BioMethod Stream::s_bio_method;
#if OPENSSL_VERSION_NUMBER < 0x10002000L || defined(LIBRESSL_VERSION_NUMBER)
namespace {
// check_common_name() checks that \param server_cert constains host_name
// as Common Name. The function is used by verify_callback() for
// OpenSSL versions before 1.0.2.
bool check_common_name(X509* server_cert, const std::string& host_name)
{
// Find the position of the Common Name field in the Subject field of the certificate
int common_name_loc = -1;
common_name_loc = X509_NAME_get_index_by_NID(X509_get_subject_name(server_cert), NID_commonName, -1);
if (common_name_loc < 0)
return false;
// Extract the Common Name field
X509_NAME_ENTRY* common_name_entry;
common_name_entry = X509_NAME_get_entry(X509_get_subject_name(server_cert), common_name_loc);
if (!common_name_entry)
return false;
// Convert the Common Namefield to a C string
ASN1_STRING* common_name_asn1;
common_name_asn1 = X509_NAME_ENTRY_get_data(common_name_entry);
if (!common_name_asn1)
return false;
char* common_name_str = reinterpret_cast<char*>(ASN1_STRING_data(common_name_asn1));
// Make sure there isn't an embedded NUL character in the Common Name
if (static_cast<std::size_t>(ASN1_STRING_length(common_name_asn1)) != std::strlen(common_name_str))
return false;
bool names_equal = (host_name == common_name_str);
return names_equal;
}
// check_common_name() checks that \param server_cert constains host_name
// in the Subject Alternative Name DNS section. The function is used by verify_callback()
// for OpenSSL versions before 1.0.2.
bool check_san(X509* server_cert, const std::string& host_name)
{
STACK_OF(GENERAL_NAME) * san_names;
// Try to extract the names within the SAN extension from the certificate
san_names =
static_cast<STACK_OF(GENERAL_NAME)*>(X509_get_ext_d2i(server_cert, NID_subject_alt_name, nullptr, nullptr));
if (!san_names)
return false;
int san_names_nb = sk_GENERAL_NAME_num(san_names);
bool found = false;
// Check each name within the extension
for (int i = 0; i < san_names_nb; ++i) {
const GENERAL_NAME* current_name = sk_GENERAL_NAME_value(san_names, i);
if (current_name->type == GEN_DNS) {
// Current name is a DNS name
char* dns_name = static_cast<char*>(ASN1_STRING_data(current_name->d.dNSName));
// Make sure there isn't an embedded NUL character in the DNS name
if (static_cast<std::size_t>(ASN1_STRING_length(current_name->d.dNSName)) != std::strlen(dns_name))
break;
if (host_name == dns_name) {
found = true;
break;
}
}
}
sk_GENERAL_NAME_pop_free(san_names, GENERAL_NAME_free);
return found;
}
} // namespace
int Stream::verify_callback_using_hostname(int preverify_ok, X509_STORE_CTX* ctx) noexcept
{
if (preverify_ok != 1)
return preverify_ok;
X509* server_cert = X509_STORE_CTX_get_current_cert(ctx);
int err = X509_STORE_CTX_get_error(ctx);
if (err != X509_V_OK)
return 0;
int depth = X509_STORE_CTX_get_error_depth(ctx);
// We only inspect the certificate at depth = 0.
if (depth > 0)
return preverify_ok;
// Retrieve the pointer to the SSL object for this connection.
SSL* ssl = static_cast<SSL*>(X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx()));
// The stream object is stored as data in the SSL object.
Stream* stream = static_cast<Stream*>(SSL_get_ex_data(ssl, 0));
const std::string& host_name = stream->m_host_name;
if (check_common_name(server_cert, host_name))
return 1;
if (check_san(server_cert, host_name))
return 1;
return 0;
}
#endif
void Stream::ssl_set_verify_mode(VerifyMode mode, std::error_code& ec)
{
int mode_2 = 0;
switch (mode) {
case VerifyMode::none:
break;
case VerifyMode::peer:
mode_2 = SSL_VERIFY_PEER;
break;
}
int rc = SSL_set_ex_data(m_ssl, 0, this);
if (rc == 0) {
ec = std::error_code(int(ERR_get_error()), openssl_error_category);
return;
}
#if OPENSSL_VERSION_NUMBER < 0x10002000L || defined(LIBRESSL_VERSION_NUMBER)
SSL_set_verify(m_ssl, mode_2, &Stream::verify_callback_using_hostname);
#else
// verify_callback is nullptr.
SSL_set_verify(m_ssl, mode_2, nullptr);
#endif
ec = std::error_code();
}
void Stream::ssl_set_host_name(const std::string& host_name, std::error_code& ec)
{
// Enable Server Name Indication (SNI) extension
#if OPENSSL_VERSION_NUMBER >= 0x10101000L
{
#ifndef _WIN32
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
auto ret = SSL_set_tlsext_host_name(m_ssl, host_name.c_str());
#ifndef _WIN32
#pragma GCC diagnostic pop
#endif
if (ret == 0) {
ec = std::error_code(int(ERR_get_error()), openssl_error_category);
return;
}
}
#else
static_cast<void>(host_name);
static_cast<void>(ec);
#endif
// Enable host name check during certificate validation
#if OPENSSL_VERSION_NUMBER >= 0x10002000L && !defined(LIBRESSL_VERSION_NUMBER)
{
X509_VERIFY_PARAM* param = SSL_get0_param(m_ssl);
X509_VERIFY_PARAM_set_hostflags(param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS);
auto ret = X509_VERIFY_PARAM_set1_host(param, host_name.c_str(), 0);
if (ret == 0) {
ec = std::error_code(int(ERR_get_error()), openssl_error_category);
return;
}
}
#else
static_cast<void>(host_name);
static_cast<void>(ec);
#endif
}
void Stream::ssl_use_verify_callback(const std::function<SSLVerifyCallback>& callback, std::error_code&)
{
m_ssl_verify_callback = &callback;
SSL_set_verify(m_ssl, SSL_VERIFY_PEER, &Stream::verify_callback_using_delegate);
}
#ifndef _WIN32
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#endif
#if REALM_INCLUDE_CERTS
void Stream::ssl_use_included_certificates(std::error_code&)
{
REALM_ASSERT(!m_ssl_verify_callback);
SSL_set_verify(m_ssl, SSL_VERIFY_PEER, &Stream::verify_callback_using_root_certs);
}
int Stream::verify_callback_using_root_certs(int preverify_ok, X509_STORE_CTX* ctx)
{
if (preverify_ok)
return 1;
X509* server_cert = X509_STORE_CTX_get_current_cert(ctx);
SSL* ssl = static_cast<SSL*>(X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx()));
Stream* stream = static_cast<Stream*>(SSL_get_ex_data(ssl, 0));
REALM_ASSERT(stream);
util::Logger* logger = stream->logger;
const std::string& host_name = stream->m_host_name;
port_type server_port = stream->m_server_port;
if (logger && logger->would_log(util::Logger::Level::debug)) {
BIO* bio = BIO_new(BIO_s_mem());
if (bio) {
int ret = PEM_write_bio_X509(bio, server_cert);
if (ret) {
BUF_MEM* buffer;
BIO_get_mem_ptr(bio, &buffer);
const char* pem_data = buffer->data;
std::size_t pem_size = buffer->length;
logger->debug("Verifying server SSL certificate using root certificates, "
"host name = %1, server port = %2, certificate =\n%3",
host_name, server_port, StringData{pem_data, pem_size});
}
BIO_free(bio);
}
}
bool valid = verify_certificate_from_root_certs(server_cert, logger);
if (!valid && logger) {
logger->error("server SSL certificate rejected using root certificates, "
"host name = %1, server port = %2",
host_name, server_port);
}
return int(valid);
}
#endif
int Stream::verify_callback_using_delegate(int preverify_ok, X509_STORE_CTX* ctx) noexcept
{
X509* server_cert = X509_STORE_CTX_get_current_cert(ctx);
int depth = X509_STORE_CTX_get_error_depth(ctx);
BIO* bio = BIO_new(BIO_s_mem());
if (!bio) {
// certificate rejected if a memory error occurs.
return 0;
}
int ret = PEM_write_bio_X509(bio, server_cert);
if (!ret) {
BIO_free(bio);
return 0;
}
BUF_MEM* buffer;
BIO_get_mem_ptr(bio, &buffer);
const char* pem_data = buffer->data;
std::size_t pem_size = buffer->length;
SSL* ssl = static_cast<SSL*>(X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx()));
Stream* stream = static_cast<Stream*>(SSL_get_ex_data(ssl, 0));
const std::string& host_name = stream->m_host_name;
port_type server_port = stream->m_server_port;
REALM_ASSERT(stream->m_ssl_verify_callback);
const std::function<SSLVerifyCallback>& callback = *stream->m_ssl_verify_callback;
// FIXME: Oops, the callback may throw, but verify_callback_using_delegate()
// is not allowed to throw. It does not seem to be reasonable to deny the
// callback the opportunity of throwing. The right solution seems to be to
// carry an exception across the OpenSSL C-layer using the exception object
// transportation mechanism offered by C++.
bool valid = callback(host_name, server_port, pem_data, pem_size, preverify_ok, depth); // Throws
BIO_free(bio);
return int(valid);
}
#ifndef _WIN32
#pragma GCC diagnostic pop
#endif
void Stream::ssl_init()
{
SSL_CTX* ssl_ctx = m_ssl_context.m_ssl_ctx;
SSL* ssl = SSL_new(ssl_ctx);
if (REALM_UNLIKELY(!ssl)) {
std::error_code ec(int(ERR_get_error()), openssl_error_category);
throw std::system_error(ec);
}
SSL_set_mode(ssl, SSL_MODE_ENABLE_PARTIAL_WRITE);
#if defined(SSL_MODE_RELEASE_BUFFERS)
SSL_set_mode(ssl, SSL_MODE_RELEASE_BUFFERS);
#endif
BIO* bio = BIO_new(s_bio_method.bio_method);
if (REALM_UNLIKELY(!bio)) {
SSL_free(ssl);
std::error_code ec(int(ERR_get_error()), openssl_error_category);
throw std::system_error(ec);
}
#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER)
BIO_set_data(bio, this);
#else
bio->ptr = this;
#endif
SSL_set_bio(ssl, bio, bio);
m_ssl = ssl;
}
void Stream::ssl_destroy() noexcept
{
SSL_free(m_ssl);
}
int Stream::bio_write(BIO* bio, const char* data, int size) noexcept
{
#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER)
Stream& stream = *static_cast<Stream*>(BIO_get_data(bio));
#else
Stream& stream = *static_cast<Stream*>(bio->ptr);
#endif
Service::Descriptor& desc = stream.m_tcp_socket.m_desc;
std::error_code ec;
std::size_t n = desc.write_some(data, std::size_t(size), ec);
BIO_clear_retry_flags(bio);
if (ec) {
if (REALM_UNLIKELY(ec != error::resource_unavailable_try_again)) {
stream.m_bio_error_code = ec;
return -1;
}
BIO_set_retry_write(bio);
return -1;
}
return int(n);
}
int Stream::bio_read(BIO* bio, char* buffer, int size) noexcept
{
#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER)
Stream& stream = *static_cast<Stream*>(BIO_get_data(bio));
#else
Stream& stream = *static_cast<Stream*>(bio->ptr);
#endif
Service::Descriptor& desc = stream.m_tcp_socket.m_desc;
std::error_code ec;
std::size_t n = desc.read_some(buffer, std::size_t(size), ec);
BIO_clear_retry_flags(bio);
if (ec) {
if (REALM_UNLIKELY(ec == MiscExtErrors::end_of_input)) {
// This behaviour agrees with `crypto/bio/bss_sock.c` of OpenSSL.
return 0;
}
if (REALM_UNLIKELY(ec != error::resource_unavailable_try_again)) {
stream.m_bio_error_code = ec;
return -1;
}
BIO_set_retry_read(bio);
return -1;
}
return int(n);
}
int Stream::bio_puts(BIO* bio, const char* c_str) noexcept
{
std::size_t size = std::strlen(c_str);
return bio_write(bio, c_str, int(size));
}
long Stream::bio_ctrl(BIO*, int cmd, long, void*) noexcept
{
switch (cmd) {
case BIO_CTRL_PUSH:
case BIO_CTRL_POP:
// Ignoring in alignment with `crypto/bio/bss_sock.c` of OpenSSL.
return 0;
case BIO_CTRL_FLUSH:
// Ignoring in alignment with `crypto/bio/bss_sock.c` of OpenSSL.
return 1;
}
REALM_ASSERT(false);
return 0;
}
int Stream::bio_create(BIO* bio) noexcept
{
#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER)
BIO_set_init(bio, 1);
BIO_set_data(bio, nullptr);
BIO_clear_flags(bio, 0);
BIO_set_shutdown(bio, 0);
#else
// In alignment with `crypto/bio/bss_sock.c` of OpenSSL.
bio->init = 1;
bio->num = 0;
bio->ptr = nullptr;
bio->flags = 0;
#endif
return 1;
}
int Stream::bio_destroy(BIO*) noexcept
{
return 1;
}
#elif REALM_HAVE_SECURE_TRANSPORT
#pragma GCC diagnostic ignored "-Wdeprecated-declarations" // FIXME: Should this be removed at some point?
void Context::ssl_init() {}
void Context::ssl_destroy() noexcept
{
#if REALM_HAVE_KEYCHAIN_APIS
if (m_keychain) {
m_keychain.reset();
unlink(m_keychain_path.data());
m_keychain_path = {};
}
#endif
}
// Load certificates and/or keys from the specified PEM file. If keychain is non-null, the items will be
// imported into that keychain.
util::CFPtr<CFArrayRef> Context::load_pem_file(const std::string& path, SecKeychainRef keychain, std::error_code& ec)
{
using util::adoptCF;
using util::CFPtr;
std::ifstream file(path);
if (!file) {
// Rely on the open attempt having set errno to a sensible value as ifstream's
// own error reporting gives terribly generic error messages.
ec = make_basic_system_error_code(errno);
return util::CFPtr<CFArrayRef>();
}
std::vector<char> contents{std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>()};
auto contentsCF = adoptCF(CFDataCreateWithBytesNoCopy(nullptr, reinterpret_cast<const UInt8*>(contents.data()),
contents.size(), kCFAllocatorNull));
// If we don't need to import it into a keychain, try to interpret the data
// as a certificate directly. This only works for DER files, so we fall back
// to SecItemImport() on platforms which support that if this fails.
if (keychain == nullptr) {
if (auto certificate = adoptCF(SecCertificateCreateWithData(NULL, contentsCF.get()))) {
auto ref = certificate.get();
return adoptCF(CFArrayCreate(nullptr, const_cast<const void**>(reinterpret_cast<void**>(&ref)), 1,
&kCFTypeArrayCallBacks));
}
// SecCertificateCreateWithData doesn't tell us why it failed, so just
// report the error code that SecItemImport uses when given something
// that's not a certificate
ec = std::error_code(errSecUnknownFormat, secure_transport_error_category);
}
CFArrayRef items = nullptr;
#if REALM_HAVE_KEYCHAIN_APIS
SecItemImportExportKeyParameters params{};
params.version = SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION;
CFPtr<CFStringRef> pathCF = adoptCF(CFStringCreateWithBytes(nullptr, reinterpret_cast<const UInt8*>(path.data()),
path.size(), kCFStringEncodingUTF8, false));
SecExternalFormat format = kSecFormatUnknown;
SecExternalItemType itemType = kSecItemTypeUnknown;
if (OSStatus status =
SecItemImport(contentsCF.get(), pathCF.get(), &format, &itemType, 0, ¶ms, keychain, &items)) {
ec = std::error_code(status, secure_transport_error_category);
return util::CFPtr<CFArrayRef>();
}
ec = {};
#endif
return adoptCF(items);
}
#if REALM_HAVE_KEYCHAIN_APIS
static std::string temporary_directory()
{
auto ensure_trailing_slash = [](auto str) {
return str.back() == '/' ? str : str + '/';
};
std::string path;
path.resize(PATH_MAX);
std::size_t result = confstr(_CS_DARWIN_USER_TEMP_DIR, &path[0], path.size());
if (result && result <= path.size()) {
path.resize(result - 1);
return ensure_trailing_slash(std::move(path));
}
// We failed to retrieve temporary directory from confstr. Fall back to the TMPDIR
// environment variable if we're not running with elevated privileges, and then to /tmp.
if (!issetugid()) {
path = getenv("TMPDIR");
if (path.size()) {
return ensure_trailing_slash(std::move(path));
}
}
return "/tmp/";
}
std::error_code Context::open_temporary_keychain_if_needed()
{
if (m_keychain) {
return std::error_code();
}
std::string path = temporary_directory() + "realm-sync-ssl-XXXXXXXX.keychain";
int fd = mkstemps(&path[0], std::strlen(".keychain"));
if (fd < 0) {
return make_basic_system_error_code(errno);
}
// Close and remove the file so that we can create a keychain in its place.
close(fd);
unlink(path.data());
SecKeychainRef keychain = nullptr;
std::string password = "";
if (OSStatus status =
SecKeychainCreate(path.data(), UInt32(password.size()), password.data(), false, nullptr, &keychain))
return std::error_code(status, secure_transport_error_category);
m_keychain = adoptCF(keychain);
m_keychain_path = std::move(path);
return std::error_code();
}
// Creates an identity from the certificate and private key. The private key must exist in m_keychain.
std::error_code Context::update_identity_if_needed()
{
// If we've not yet loaded both the certificate and private key there's nothing to do.
if (!m_certificate || !m_private_key) {
return std::error_code();
}
SecIdentityRef identity = nullptr;
if (OSStatus status = SecIdentityCreateWithCertificate(m_keychain.get(), m_certificate.get(), &identity)) {
return std::error_code(status, secure_transport_error_category);
}
m_identity = util::adoptCF(identity);
return std::error_code();
}
#endif // REALM_HAVE_KEYCHAIN_APIS
void Context::ssl_use_certificate_chain_file(const std::string& path, std::error_code& ec)
{
#if !REALM_HAVE_KEYCHAIN_APIS
static_cast<void>(path);
ec = make_basic_system_error_code(ENOTSUP);
#else
auto items = load_pem_file(path, nullptr, ec);
if (!items) {
REALM_ASSERT(ec);
return;
}
if (CFArrayGetCount(items.get()) < 1) {
ec = std::error_code(errSecItemNotFound, secure_transport_error_category);
return;
}
CFTypeRef item = CFArrayGetValueAtIndex(items.get(), 0);
if (CFGetTypeID(item) != SecCertificateGetTypeID()) {
ec = std::error_code(errSecItemNotFound, secure_transport_error_category);
return;
}
m_certificate = util::retainCF(reinterpret_cast<SecCertificateRef>(const_cast<void*>(item)));
// The returned array contains the server certificate followed by the remainder of the certificates in the chain.
// Remove the server certificate to leave us with an array containing only the remainder of the certificate chain.
auto certificate_chain = util::adoptCF(CFArrayCreateMutableCopy(nullptr, 0, items.get()));
CFArrayRemoveValueAtIndex(certificate_chain.get(), 0);
m_certificate_chain = util::adoptCF(reinterpret_cast<CFArrayRef>(certificate_chain.release()));
ec = update_identity_if_needed();
#endif
}
void Context::ssl_use_private_key_file(const std::string& path, std::error_code& ec)
{
#if !REALM_HAVE_KEYCHAIN_APIS
static_cast<void>(path);
ec = make_basic_system_error_code(ENOTSUP);
#else
ec = open_temporary_keychain_if_needed();
if (ec) {
return;
}
auto items = load_pem_file(path, m_keychain.get(), ec);
if (!items) {
return;
}
if (CFArrayGetCount(items.get()) != 1) {
ec = std::error_code(errSecItemNotFound, secure_transport_error_category);
return;
}
CFTypeRef item = CFArrayGetValueAtIndex(items.get(), 0);
if (CFGetTypeID(item) != SecKeyGetTypeID()) {
ec = std::error_code(errSecItemNotFound, secure_transport_error_category);
return;
}
m_private_key = util::retainCF(reinterpret_cast<SecKeyRef>(const_cast<void*>(item)));
ec = update_identity_if_needed();
#endif
}
void Context::ssl_use_default_verify(std::error_code&) {}
void Context::ssl_use_verify_file(const std::string& path, std::error_code& ec)
{
#if REALM_HAVE_KEYCHAIN_APIS
m_trust_anchors = load_pem_file(path, m_keychain.get(), ec);
#else
m_trust_anchors = load_pem_file(path, nullptr, ec);
#endif
if (m_trust_anchors && CFArrayGetCount(m_trust_anchors.get())) {
const void* leaf_certificate = CFArrayGetValueAtIndex(m_trust_anchors.get(), 0);
m_pinned_certificate =
adoptCF(SecCertificateCopyData(static_cast<SecCertificateRef>(const_cast<void*>(leaf_certificate))));
}
else {
m_pinned_certificate.reset();
}
}
void Stream::ssl_init()
{
SSLProtocolSide side = m_handshake_type == HandshakeType::client ? kSSLClientSide : kSSLServerSide;
m_ssl = util::adoptCF(SSLCreateContext(nullptr, side, kSSLStreamType));
if (OSStatus status = SSLSetIOFuncs(m_ssl.get(), Stream::tcp_read, Stream::tcp_write)) {
std::error_code ec(status, secure_transport_error_category);
throw std::system_error(ec);
}
if (OSStatus status = SSLSetConnection(m_ssl.get(), this)) {
std::error_code ec(status, secure_transport_error_category);
throw std::system_error(ec);
}
// Require TLSv1 or greater.
if (OSStatus status = SSLSetProtocolVersionMin(m_ssl.get(), kTLSProtocol1)) {
std::error_code ec(status, secure_transport_error_category);
throw std::system_error(ec);
}
// Break after certificate exchange to allow for customizing the verification process.
SSLSessionOption option = m_handshake_type == HandshakeType::client ? kSSLSessionOptionBreakOnServerAuth
: kSSLSessionOptionBreakOnClientAuth;
if (OSStatus status = SSLSetSessionOption(m_ssl.get(), option, true)) {
std::error_code ec(status, secure_transport_error_category);
throw std::system_error(ec);
}
#if REALM_HAVE_KEYCHAIN_APIS
if (m_ssl_context.m_identity && m_ssl_context.m_certificate_chain) {
// SSLSetCertificate expects an array containing the identity followed by the identity's certificate chain.
auto certificates = util::adoptCF(CFArrayCreateMutable(nullptr, 0, &kCFTypeArrayCallBacks));
CFArrayInsertValueAtIndex(certificates.get(), 0, m_ssl_context.m_identity.get());
CFArrayRef certificate_chain = m_ssl_context.m_certificate_chain.get();
CFArrayAppendArray(certificates.get(), certificate_chain, CFRangeMake(0, CFArrayGetCount(certificate_chain)));
if (OSStatus status = SSLSetCertificate(m_ssl.get(), certificates.get())) {
std::error_code ec(status, secure_transport_error_category);
throw std::system_error(ec);
}
}
#endif
}
void Stream::ssl_destroy() noexcept
{
m_ssl.reset();
}
void Stream::ssl_set_verify_mode(VerifyMode verify_mode, std::error_code& ec)
{
m_verify_mode = verify_mode;
ec = std::error_code();
}
void Stream::ssl_set_host_name(const std::string& host_name, std::error_code& ec)
{
if (OSStatus status = SSLSetPeerDomainName(m_ssl.get(), host_name.data(), host_name.size()))
ec = std::error_code(status, secure_transport_error_category);
}
void Stream::ssl_use_verify_callback(const std::function<SSLVerifyCallback>&, std::error_code&) {}
void Stream::ssl_handshake(std::error_code& ec, Want& want) noexcept
{
auto perform = [this]() noexcept {
return do_ssl_handshake();
};
ssl_perform(std::move(perform), ec, want);
}
std::pair<OSStatus, std::size_t> Stream::do_ssl_handshake() noexcept
{
OSStatus result = SSLHandshake(m_ssl.get());
if (result != errSSLPeerAuthCompleted) {
return {result, 0};
}
if (OSStatus status = verify_peer()) {
// When performing peer verification internally, verification failure results in SecureTransport
// sending a fatal alert to the peer, closing the connection. Sadly SecureTransport has no way
// to explicitly send a fatal alert when trust evaluation is handled externally. The best we can
// do is close the connection gracefully.
SSLClose(m_ssl.get());
return {status, 0};
}
// Verification suceeded. Resume the handshake.
return do_ssl_handshake();
}
OSStatus Stream::verify_peer() noexcept
{
switch (m_verify_mode) {
case VerifyMode::none:
// Peer verification is disabled.
return noErr;
case VerifyMode::peer: {
SecTrustRef peerTrustRef = nullptr;
if (OSStatus status = SSLCopyPeerTrust(m_ssl.get(), &peerTrustRef)) {
return status;
}
auto peerTrust = util::adoptCF(peerTrustRef);
if (m_ssl_context.m_trust_anchors) {
if (OSStatus status =
SecTrustSetAnchorCertificates(peerTrust.get(), m_ssl_context.m_trust_anchors.get())) {
return status;
}
if (OSStatus status = SecTrustSetAnchorCertificatesOnly(peerTrust.get(), true)) {
return status;
}
}
// FIXME: SecTrustEvaluate can block if evaluation needs to fetch missing intermediate
// certificates or to check revocation using OCSP. Consider disabling these network
// fetches or doing async trust evaluation instead.
#if __has_builtin(__builtin_available)
if (__builtin_available(iOS 12.0, macOS 10.14, tvOS 12.0, watchOS 5.0, *)) {
CFErrorRef cfErrorRef;
if (!SecTrustEvaluateWithError(peerTrust.get(), &cfErrorRef)) {
auto cfError = util::adoptCF(cfErrorRef);
if (logger) {
auto errorStr = util::adoptCF(CFErrorCopyDescription(cfErrorRef));
logger->debug("SSL peer verification failed: %1", cfstring_to_std_string(errorStr.get()));
}
return errSSLXCertChainInvalid;
}
}
else
#endif
{
SecTrustResultType trustResult;
if (OSStatus status = SecTrustEvaluate(peerTrust.get(), &trustResult)) {
return status;
}
// A "proceed" result means the cert is explicitly trusted, e.g. "Always Trust" was selected.
// "Unspecified" means the cert has no explicit trust settings, but is implicitly OK since it
// chains back to a trusted root. Any other result means the cert is not trusted.
if (trustResult == kSecTrustResultRecoverableTrustFailure) {
// Not trusted.
return errSSLXCertChainInvalid;
}
if (trustResult != kSecTrustResultProceed && trustResult != kSecTrustResultUnspecified) {
return errSSLBadCert;
}
}
if (!m_ssl_context.m_pinned_certificate) {
// Certificate is trusted!
return noErr;
}
// Verify that the certificate is one of our pinned certificates
// Loop backwards as the pinned certificate will normally be the last one
for (CFIndex i = SecTrustGetCertificateCount(peerTrust.get()); i > 0; --i) {
SecCertificateRef certificate = SecTrustGetCertificateAtIndex(peerTrust.get(), i - 1);
auto certificate_data = adoptCF(SecCertificateCopyData(certificate));
if (CFEqual(certificate_data.get(), m_ssl_context.m_pinned_certificate.get())) {
return noErr;
}
}
// Although the cerificate is valid, it's not the one we've pinned so reject it.
return errSSLXCertChainInvalid;
}
}
}
std::size_t Stream::ssl_read(char* buffer, std::size_t size, std::error_code& ec, Want& want) noexcept
{
auto perform = [this, buffer, size]() noexcept {
return do_ssl_read(buffer, size);
};
std::size_t n = ssl_perform(std::move(perform), ec, want);
if (want == Want::nothing && n == 0 && !ec) {
// End of input on TCP socket
SSLSessionState state;
if (SSLGetSessionState(m_ssl.get(), &state) == noErr && state == kSSLClosed) {
ec = MiscExtErrors::end_of_input;
}
else {
ec = MiscExtErrors::premature_end_of_input;
}
}
return n;
}
std::pair<OSStatus, std::size_t> Stream::do_ssl_read(char* buffer, std::size_t size) noexcept
{
std::size_t processed = 0;
OSStatus result = SSLRead(m_ssl.get(), buffer, size, &processed);
return {result, processed};
}
std::size_t Stream::ssl_write(const char* data, std::size_t size, std::error_code& ec, Want& want) noexcept
{
auto perform = [this, data, size]() noexcept {
return do_ssl_write(data, size);
};
std::size_t n = ssl_perform(std::move(perform), ec, want);
if (want == Want::nothing && n == 0 && !ec) {
// End of input on TCP socket
ec = MiscExtErrors::premature_end_of_input;
}
return n;
}
std::pair<OSStatus, std::size_t> Stream::do_ssl_write(const char* data, std::size_t size) noexcept
{
m_last_error = {};
REALM_ASSERT(size >= m_num_partially_written_bytes);
data += m_num_partially_written_bytes;
size -= m_num_partially_written_bytes;
std::size_t processed = 0;
OSStatus result = SSLWrite(m_ssl.get(), data, size, &processed);
if (result != noErr) {
// Map errors that indicate the connection is closed to broken_pipe, for
// consistency with OpenSSL.
if (REALM_LIKELY(result == errSSLWouldBlock)) {
m_num_partially_written_bytes += processed;
}
else if (result == errSSLClosedGraceful || result == errSSLClosedAbort || result == errSSLClosedNoNotify) {
result = errSecIO;
m_last_error = error::broken_pipe;
}
processed = 0;
}
else {
processed += m_num_partially_written_bytes;
m_num_partially_written_bytes = 0;
}
return {result, processed};
}
bool Stream::ssl_shutdown(std::error_code& ec, Want& want) noexcept
{
auto perform = [this]() noexcept {
return do_ssl_shutdown();
};
std::size_t n = ssl_perform(std::move(perform), ec, want);
REALM_ASSERT(n == 0 || n == 1);
return (n > 0);
}
std::pair<OSStatus, std::size_t> Stream::do_ssl_shutdown() noexcept
{
SSLSessionState previousState;
if (OSStatus result = SSLGetSessionState(m_ssl.get(), &previousState)) {
return {result, false};
}
if (OSStatus result = SSLClose(m_ssl.get())) {
return {result, false};
}
// SSLClose returns noErr if it encountered an I/O error. We can still
// detect such errors if they originated from our underlying tcp_read /
// tcp_write functions as we'll have set m_last_error in such cases. This
// allows us to reconstruct the I/O error and communicate it to our caller.
if (m_last_error) {
return {errSecIO, false};
}
return {noErr, previousState == kSSLClosed};
}
OSStatus Stream::tcp_read(SSLConnectionRef connection, void* data, std::size_t* length) noexcept
{
return static_cast<Stream*>(const_cast<void*>(connection))->tcp_read(data, length);
}
OSStatus Stream::tcp_read(void* data, std::size_t* length) noexcept
{
Service::Descriptor& desc = m_tcp_socket.m_desc;
std::error_code ec;
std::size_t bytes_read = desc.read_some(reinterpret_cast<char*>(data), *length, ec);
// A successful but short read should be treated the same as EAGAIN.
if (!ec && bytes_read < *length) {
ec = error::resource_unavailable_try_again;
}
*length = bytes_read;
m_last_error = ec;
if (ec) {
if (REALM_UNLIKELY(ec == MiscExtErrors::end_of_input)) {
return noErr;
}
if (ec == error::resource_unavailable_try_again) {
m_last_operation = BlockingOperation::read;
return errSSLWouldBlock;
}
return errSecIO;
}
return noErr;
}
OSStatus Stream::tcp_write(SSLConnectionRef connection, const void* data, std::size_t* length) noexcept
{
return static_cast<Stream*>(const_cast<void*>(connection))->tcp_write(data, length);
}
OSStatus Stream::tcp_write(const void* data, std::size_t* length) noexcept
{
Service::Descriptor& desc = m_tcp_socket.m_desc;
std::error_code ec;
std::size_t bytes_written = desc.write_some(reinterpret_cast<const char*>(data), *length, ec);
// A successful but short write should be treated the same as EAGAIN.
if (!ec && bytes_written < *length) {
ec = error::resource_unavailable_try_again;
}
*length = bytes_written;
m_last_error = ec;
if (ec) {
if (ec == error::resource_unavailable_try_again) {
m_last_operation = BlockingOperation::write;
return errSSLWouldBlock;
}
return errSecIO;
}
return noErr;
}
#else // !REALM_HAVE_OPENSSL && !REALM_HAVE_SECURE_TRANSPORT
void Context::ssl_init()
{
throw ProtocolNotSupported();
}
void Context::ssl_destroy() noexcept {}
void Stream::ssl_init() {}
void Stream::ssl_destroy() noexcept {}
void Context::ssl_use_certificate_chain_file(const std::string&, std::error_code&) {}
void Context::ssl_use_private_key_file(const std::string&, std::error_code&) {}
void Context::ssl_use_default_verify(std::error_code&) {}
void Context::ssl_use_verify_file(const std::string&, std::error_code&) {}
void Stream::ssl_set_verify_mode(VerifyMode, std::error_code&) {}
void Stream::ssl_set_host_name(const std::string&, std::error_code&) {}
void Stream::ssl_use_verify_callback(const std::function<SSLVerifyCallback>&, std::error_code&) {}
void Stream::ssl_handshake(std::error_code&, Want&) noexcept {}
std::size_t Stream::ssl_read(char*, std::size_t, std::error_code&, Want&) noexcept
{
return 0;
}
std::size_t Stream::ssl_write(const char*, std::size_t, std::error_code&, Want&) noexcept
{
return 0;
}
bool Stream::ssl_shutdown(std::error_code&, Want&) noexcept
{
return false;
}
bool is_server_cert_rejected_error(std::error_code&)
{
return false;
}
#endif // ! REALM_HAVE_OPENSSL
} // namespace ssl
} // namespace network
} // namespace util
} // namespace realm
| 20,074 |
348 |
{"nom":"Vernou-la-Celle-sur-Seine","circ":"3ème circonscription","dpt":"Seine-et-Marne","inscrits":2067,"abs":1325,"votants":742,"blancs":51,"nuls":8,"exp":683,"res":[{"nuance":"UDI","nom":"<NAME>","voix":471},{"nuance":"FN","nom":"<NAME>","voix":212}]}
| 107 |
388 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC, abstractmethod
class ChannelProvider(ABC):
"""
ChannelProvider interface. This interface allows Bots to provide their own
implementation for the configuration parameters to connect to a Bot.
Framework channel service.
"""
@abstractmethod
async def get_channel_service(self) -> str:
raise NotImplementedError()
@abstractmethod
def is_government(self) -> bool:
raise NotImplementedError()
@abstractmethod
def is_public_azure(self) -> bool:
raise NotImplementedError()
| 205 |
965 |
BOOL CExampleView::OnPreparePrinting(CPrintInfo* pInfo)
{
//The document has 2 pages.
pInfo->SetMaxPage(2);
return CView::DoPreparePrinting(pInfo);
}
| 61 |
318 |
/* SCCS Id: @(#)zap.c 3.4 2003/08/24 */
/* Copyright (c) <NAME>, Amsterdam, 1985. */
/* NetHack may be freely redistributed. See license for details. */
#include "hack.h"
/* Disintegration rays have special treatment; corpses are never left.
* But the routine which calculates the damage is separate from the routine
* which kills the monster. The damage routine returns this cookie to
* indicate that the monster should be disintegrated.
*/
#define MAGIC_COOKIE 1000
#ifdef OVLB
static NEARDATA boolean obj_zapped;
static NEARDATA int poly_zapped;
#endif
extern boolean notonhead; /* for long worms */
/* kludge to use mondied instead of killed */
extern boolean m_using;
STATIC_DCL void FDECL(costly_cancel, (struct obj *));
STATIC_DCL void FDECL(polyuse, (struct obj*, int, int));
STATIC_DCL void FDECL(create_polymon, (struct obj *, int));
STATIC_DCL boolean FDECL(zap_updown, (struct obj *));
STATIC_DCL int FDECL(zhitm, (struct monst *,int,int,struct obj **));
STATIC_DCL void FDECL(zhitu, (int,int,const char *,XCHAR_P,XCHAR_P));
STATIC_DCL void FDECL(revive_egg, (struct obj *));
#ifdef STEED
STATIC_DCL boolean FDECL(zap_steed, (struct obj *));
#endif
#ifdef OVLB
STATIC_DCL int FDECL(zap_hit, (int,int));
#endif
#ifdef OVL0
STATIC_DCL void FDECL(backfire, (struct obj *));
STATIC_DCL int FDECL(spell_hit_bonus, (int));
#endif
#define ZT_MAGIC_MISSILE (AD_MAGM-1)
#define ZT_FIRE (AD_FIRE-1)
#define ZT_COLD (AD_COLD-1)
#define ZT_SLEEP (AD_SLEE-1)
#define ZT_DEATH (AD_DISN-1) /* or disintegration */
#define ZT_LIGHTNING (AD_ELEC-1)
#define ZT_POISON_GAS (AD_DRST-1)
#define ZT_ACID (AD_ACID-1)
/* 8 and 9 are currently unassigned */
#define ZT_WAND(x) (x)
#define ZT_SPELL(x) (10+(x))
#define ZT_BREATH(x) (20+(x))
#define is_hero_spell(type) ((type) >= 10 && (type) < 20)
#ifndef OVLB
STATIC_VAR const char are_blinded_by_the_flash[];
extern const char * const flash_types[];
#else
STATIC_VAR const char are_blinded_by_the_flash[] = "are blinded by the flash!";
const char * const flash_types[] = { /* also used in buzzmu(mcastu.c) */
"magic missile", /* Wands must be 0-9 */
"bolt of fire",
"bolt of cold",
"sleep ray",
"death ray",
"bolt of lightning",
"",
"",
"",
"",
"magic missile", /* Spell equivalents must be 10-19 */
"fireball",
"cone of cold",
"sleep ray",
"finger of death",
"bolt of lightning", /* There is no spell, used for retribution */
"",
"",
"",
"",
"blast of missiles", /* Dragon breath equivalents 20-29*/
"blast of fire",
"blast of frost",
"blast of sleep gas",
"blast of disintegration",
"blast of lightning",
"blast of poison gas",
"blast of acid",
"",
""
};
/* Routines for IMMEDIATE wands and spells. */
/* bhitm: monster mtmp was hit by the effect of wand or spell otmp */
int
bhitm(mtmp, otmp)
struct monst *mtmp;
struct obj *otmp;
{
boolean wake = TRUE; /* Most 'zaps' should wake monster */
boolean reveal_invis = FALSE;
boolean dbldam = Role_if(PM_KNIGHT) && u.uhave.questart;
int dmg, otyp = otmp->otyp;
const char *zap_type_text = "spell";
struct obj *obj;
boolean disguised_mimic = (mtmp->data->mlet == S_MIMIC &&
mtmp->m_ap_type != M_AP_NOTHING);
if (u.uswallow && mtmp == u.ustuck)
reveal_invis = FALSE;
switch(otyp) {
case WAN_STRIKING:
zap_type_text = "wand";
/* fall through */
case SPE_FORCE_BOLT:
reveal_invis = TRUE;
if (resists_magm(mtmp)) { /* match effect on player */
shieldeff(mtmp->mx, mtmp->my);
break; /* skip makeknown */
} else if (u.uswallow || rnd(20) < 10 + find_mac(mtmp)) {
dmg = d(2,12);
if(dbldam) dmg *= 2;
if (otyp == SPE_FORCE_BOLT)
dmg += spell_damage_bonus();
hit(zap_type_text, mtmp, exclam(dmg));
(void) resist(mtmp, otmp->oclass, dmg, TELL);
} else miss(zap_type_text, mtmp);
makeknown(otyp);
break;
case WAN_SLOW_MONSTER:
case SPE_SLOW_MONSTER:
if (!resist(mtmp, otmp->oclass, 0, NOTELL)) {
mon_adjust_speed(mtmp, -1, otmp);
m_dowear(mtmp, FALSE); /* might want speed boots */
if (u.uswallow && (mtmp == u.ustuck) &&
is_whirly(mtmp->data)) {
You("disrupt %s!", mon_nam(mtmp));
pline("A huge hole opens up...");
expels(mtmp, mtmp->data, TRUE);
}
}
break;
case WAN_SPEED_MONSTER:
if (!resist(mtmp, otmp->oclass, 0, NOTELL)) {
mon_adjust_speed(mtmp, 1, otmp);
m_dowear(mtmp, FALSE); /* might want speed boots */
}
break;
case WAN_UNDEAD_TURNING:
case SPE_TURN_UNDEAD:
wake = FALSE;
if (unturn_dead(mtmp)) wake = TRUE;
if (is_undead(mtmp->data)) {
reveal_invis = TRUE;
wake = TRUE;
dmg = rnd(8);
if(dbldam) dmg *= 2;
if (otyp == SPE_TURN_UNDEAD)
dmg += spell_damage_bonus();
flags.bypasses = TRUE; /* for make_corpse() */
if (!resist(mtmp, otmp->oclass, dmg, NOTELL)) {
if (mtmp->mhp > 0) monflee(mtmp, 0, FALSE, TRUE);
}
}
break;
case WAN_POLYMORPH:
case SPE_POLYMORPH:
case POT_POLYMORPH:
if (resists_magm(mtmp)) {
/* magic resistance protects from polymorph traps, so make
it guard against involuntary polymorph attacks too... */
shieldeff(mtmp->mx, mtmp->my);
} else if (!resist(mtmp, otmp->oclass, 0, NOTELL)) {
/* natural shapechangers aren't affected by system shock
(unless protection from shapechangers is interfering
with their metabolism...) */
if (mtmp->cham == CHAM_ORDINARY && !rn2(25)) {
if (canseemon(mtmp)) {
pline("%s shudders!", Monnam(mtmp));
makeknown(otyp);
}
/* dropped inventory shouldn't be hit by this zap */
for (obj = mtmp->minvent; obj; obj = obj->nobj)
bypass_obj(obj);
/* flags.bypasses = TRUE; ## for make_corpse() */
/* no corpse after system shock */
xkilled(mtmp, 3);
} else if (newcham(mtmp, (struct permonst *)0,
(otyp != POT_POLYMORPH), FALSE)) {
if (!Hallucination && canspotmon(mtmp))
makeknown(otyp);
}
}
break;
case WAN_CANCELLATION:
case SPE_CANCELLATION:
(void) cancel_monst(mtmp, otmp, TRUE, TRUE, FALSE);
break;
case WAN_TELEPORTATION:
case SPE_TELEPORT_AWAY:
reveal_invis = !u_teleport_mon(mtmp, TRUE);
break;
case WAN_MAKE_INVISIBLE:
{
int oldinvis = mtmp->minvis;
char nambuf[BUFSZ];
/* format monster's name before altering its visibility */
Strcpy(nambuf, Monnam(mtmp));
mon_set_minvis(mtmp);
if (!oldinvis && knowninvisible(mtmp)) {
pline("%s turns transparent!", nambuf);
makeknown(otyp);
}
break;
}
case WAN_NOTHING:
case WAN_LOCKING:
case SPE_WIZARD_LOCK:
wake = FALSE;
break;
case WAN_PROBING:
wake = FALSE;
reveal_invis = TRUE;
probe_monster(mtmp);
makeknown(otyp);
break;
case WAN_OPENING:
case SPE_KNOCK:
wake = FALSE; /* don't want immediate counterattack */
if (u.uswallow && mtmp == u.ustuck) {
if (is_animal(mtmp->data)) {
if (Blind) You_feel("a sudden rush of air!");
else pline("%s opens its mouth!", Monnam(mtmp));
}
expels(mtmp, mtmp->data, TRUE);
#ifdef STEED
} else if (!!(obj = which_armor(mtmp, W_SADDLE))) {
mtmp->misc_worn_check &= ~obj->owornmask;
update_mon_intrinsics(mtmp, obj, FALSE, FALSE);
obj->owornmask = 0L;
obj_extract_self(obj);
place_object(obj, mtmp->mx, mtmp->my);
/* call stackobj() if we ever drop anything that can merge */
newsym(mtmp->mx, mtmp->my);
#endif
}
break;
case SPE_HEALING:
case SPE_EXTRA_HEALING:
reveal_invis = TRUE;
if (mtmp->data != &mons[PM_PESTILENCE]) {
wake = FALSE; /* wakeup() makes the target angry */
mtmp->mhp += d(6, otyp == SPE_EXTRA_HEALING ? 8 : 4);
if (mtmp->mhp > mtmp->mhpmax)
mtmp->mhp = mtmp->mhpmax;
if (mtmp->mblinded) {
mtmp->mblinded = 0;
mtmp->mcansee = 1;
}
if (canseemon(mtmp)) {
if (disguised_mimic) {
if (mtmp->m_ap_type == M_AP_OBJECT &&
mtmp->mappearance == STRANGE_OBJECT) {
/* it can do better now */
set_mimic_sym(mtmp);
newsym(mtmp->mx, mtmp->my);
} else
mimic_hit_msg(mtmp, otyp);
} else pline("%s looks%s better.", Monnam(mtmp),
otyp == SPE_EXTRA_HEALING ? " much" : "" );
}
if (mtmp->mtame || mtmp->mpeaceful) {
adjalign(Role_if(PM_HEALER) ? 1 : sgn(u.ualign.type));
}
} else { /* Pestilence */
/* Pestilence will always resist; damage is half of 3d{4,8} */
(void) resist(mtmp, otmp->oclass,
d(3, otyp == SPE_EXTRA_HEALING ? 8 : 4), TELL);
}
break;
case WAN_LIGHT: /* (broken wand) */
if (flash_hits_mon(mtmp, otmp)) {
makeknown(WAN_LIGHT);
reveal_invis = TRUE;
}
break;
case WAN_SLEEP: /* (broken wand) */
/* [wakeup() doesn't rouse victims of temporary sleep,
so it's okay to leave `wake' set to TRUE here] */
reveal_invis = TRUE;
if (sleep_monst(mtmp, d(1 + otmp->spe, 12), WAND_CLASS))
slept_monst(mtmp);
if (!Blind) makeknown(WAN_SLEEP);
break;
case SPE_STONE_TO_FLESH:
if (monsndx(mtmp->data) == PM_STONE_GOLEM) {
char *name = Monnam(mtmp);
/* turn into flesh golem */
if (newcham(mtmp, &mons[PM_FLESH_GOLEM], FALSE, FALSE)) {
if (canseemon(mtmp))
pline("%s turns to flesh!", name);
} else {
if (canseemon(mtmp))
pline("%s looks rather fleshy for a moment.",
name);
}
} else
wake = FALSE;
break;
case SPE_DRAIN_LIFE:
dmg = rnd(8);
if(dbldam) dmg *= 2;
if (otyp == SPE_DRAIN_LIFE)
dmg += spell_damage_bonus();
if (resists_drli(mtmp))
shieldeff(mtmp->mx, mtmp->my);
else if (!resist(mtmp, otmp->oclass, dmg, NOTELL) &&
mtmp->mhp > 0) {
mtmp->mhp -= dmg;
mtmp->mhpmax -= dmg;
if (mtmp->mhp <= 0 || mtmp->mhpmax <= 0 || mtmp->m_lev < 1)
xkilled(mtmp, 1);
else {
mtmp->m_lev--;
if (canseemon(mtmp))
pline("%s suddenly seems weaker!", Monnam(mtmp));
}
}
break;
default:
impossible("What an interesting effect (%d)", otyp);
break;
}
if(wake) {
if(mtmp->mhp > 0) {
wakeup(mtmp);
m_respond(mtmp);
if(mtmp->isshk && !*u.ushops) hot_pursuit(mtmp);
} else if(mtmp->m_ap_type)
seemimic(mtmp); /* might unblock if mimicing a boulder/door */
}
/* note: bhitpos won't be set if swallowed, but that's okay since
* reveal_invis will be false. We can't use mtmp->mx, my since it
* might be an invisible worm hit on the tail.
*/
if (reveal_invis) {
if (mtmp->mhp > 0 && cansee(bhitpos.x, bhitpos.y) &&
!canspotmon(mtmp))
map_invisible(bhitpos.x, bhitpos.y);
}
return 0;
}
void
probe_monster(mtmp)
struct monst *mtmp;
{
struct obj *otmp;
mstatusline(mtmp);
if (notonhead) return; /* don't show minvent for long worm tail */
#ifndef GOLDOBJ
if (mtmp->minvent || mtmp->mgold) {
#else
if (mtmp->minvent) {
#endif
for (otmp = mtmp->minvent; otmp; otmp = otmp->nobj)
otmp->dknown = 1; /* treat as "seen" */
(void) display_minventory(mtmp, MINV_ALL, (char *)0);
} else {
pline("%s is not carrying anything.", noit_Monnam(mtmp));
}
}
#endif /*OVLB*/
#ifdef OVL1
/*
* Return the object's physical location. This only makes sense for
* objects that are currently on the level (i.e. migrating objects
* are nowhere). By default, only things that can be seen (in hero's
* inventory, monster's inventory, or on the ground) are reported.
* By adding BURIED_TOO and/or CONTAINED_TOO flags, you can also get
* the location of buried and contained objects. Note that if an
* object is carried by a monster, its reported position may change
* from turn to turn. This function returns FALSE if the position
* is not available or subject to the constraints above.
*/
boolean
get_obj_location(obj, xp, yp, locflags)
struct obj *obj;
xchar *xp, *yp;
int locflags;
{
switch (obj->where) {
case OBJ_INVENT:
*xp = u.ux;
*yp = u.uy;
return TRUE;
case OBJ_FLOOR:
*xp = obj->ox;
*yp = obj->oy;
return TRUE;
case OBJ_MINVENT:
if (obj->ocarry->mx) {
*xp = obj->ocarry->mx;
*yp = obj->ocarry->my;
return TRUE;
}
break; /* !mx => migrating monster */
case OBJ_BURIED:
if (locflags & BURIED_TOO) {
*xp = obj->ox;
*yp = obj->oy;
return TRUE;
}
break;
case OBJ_CONTAINED:
if (locflags & CONTAINED_TOO)
return get_obj_location(obj->ocontainer, xp, yp, locflags);
break;
}
*xp = *yp = 0;
return FALSE;
}
boolean
get_mon_location(mon, xp, yp, locflags)
struct monst *mon;
xchar *xp, *yp;
int locflags; /* non-zero means get location even if monster is buried */
{
if (mon == &youmonst) {
*xp = u.ux;
*yp = u.uy;
return TRUE;
} else if (mon->mx > 0 && (!mon->mburied || locflags)) {
*xp = mon->mx;
*yp = mon->my;
return TRUE;
} else { /* migrating or buried */
*xp = *yp = 0;
return FALSE;
}
}
/* used by revive() and animate_statue() */
struct monst *
montraits(obj,cc)
struct obj *obj;
coord *cc;
{
struct monst *mtmp = (struct monst *)0;
struct monst *mtmp2 = (struct monst *)0;
if (obj->oxlth && (obj->oattached == OATTACHED_MONST))
mtmp2 = get_mtraits(obj, TRUE);
if (mtmp2) {
/* save_mtraits() validated mtmp2->mnum */
mtmp2->data = &mons[mtmp2->mnum];
if (mtmp2->mhpmax <= 0 && !is_rider(mtmp2->data))
return (struct monst *)0;
mtmp = makemon(mtmp2->data,
cc->x, cc->y, NO_MINVENT|MM_NOWAIT|MM_NOCOUNTBIRTH);
if (!mtmp) return mtmp;
/* heal the monster */
if (mtmp->mhpmax > mtmp2->mhpmax && is_rider(mtmp2->data))
mtmp2->mhpmax = mtmp->mhpmax;
mtmp2->mhp = mtmp2->mhpmax;
/* Get these ones from mtmp */
mtmp2->minvent = mtmp->minvent; /*redundant*/
/* monster ID is available if the monster died in the current
game, but should be zero if the corpse was in a bones level
(we cleared it when loading bones) */
if (!mtmp2->m_id)
mtmp2->m_id = mtmp->m_id;
mtmp2->mx = mtmp->mx;
mtmp2->my = mtmp->my;
mtmp2->mux = mtmp->mux;
mtmp2->muy = mtmp->muy;
mtmp2->mw = mtmp->mw;
mtmp2->wormno = mtmp->wormno;
mtmp2->misc_worn_check = mtmp->misc_worn_check;
mtmp2->weapon_check = mtmp->weapon_check;
mtmp2->mtrapseen = mtmp->mtrapseen;
mtmp2->mflee = mtmp->mflee;
mtmp2->mburied = mtmp->mburied;
mtmp2->mundetected = mtmp->mundetected;
mtmp2->mfleetim = mtmp->mfleetim;
mtmp2->mlstmv = mtmp->mlstmv;
mtmp2->m_ap_type = mtmp->m_ap_type;
/* set these ones explicitly */
mtmp2->mavenge = 0;
mtmp2->meating = 0;
mtmp2->mleashed = 0;
mtmp2->mtrapped = 0;
mtmp2->msleeping = 0;
mtmp2->mfrozen = 0;
mtmp2->mcanmove = 1;
/* most cancelled monsters return to normal,
but some need to stay cancelled */
if (!dmgtype(mtmp2->data, AD_SEDU)
#ifdef SEDUCE
&& !dmgtype(mtmp2->data, AD_SSEX)
#endif
) mtmp2->mcan = 0;
mtmp2->mcansee = 1; /* set like in makemon */
mtmp2->mblinded = 0;
mtmp2->mstun = 0;
mtmp2->mconf = 0;
replmon(mtmp,mtmp2);
}
return mtmp2;
}
/*
* get_container_location() returns the following information
* about the outermost container:
* loc argument gets set to:
* OBJ_INVENT if in hero's inventory; return 0.
* OBJ_FLOOR if on the floor; return 0.
* OBJ_BURIED if buried; return 0.
* OBJ_MINVENT if in monster's inventory; return monster.
* container_nesting is updated with the nesting depth of the containers
* if applicable.
*/
struct monst *
get_container_location(obj, loc, container_nesting)
struct obj *obj;
int *loc;
int *container_nesting;
{
if (!obj || !loc)
return 0;
if (container_nesting) *container_nesting = 0;
while (obj && obj->where == OBJ_CONTAINED) {
if (container_nesting) *container_nesting += 1;
obj = obj->ocontainer;
}
if (obj) {
*loc = obj->where; /* outermost container's location */
if (obj->where == OBJ_MINVENT) return obj->ocarry;
}
return (struct monst *)0;
}
/*
* Attempt to revive the given corpse, return the revived monster if
* successful. Note: this does NOT use up the corpse if it fails.
*/
struct monst *
revive(obj)
register struct obj *obj;
{
register struct monst *mtmp = (struct monst *)0;
struct obj *container = (struct obj *)0;
int container_nesting = 0;
schar savetame = 0;
boolean recorporealization = FALSE;
boolean in_container = FALSE;
if(obj->otyp == CORPSE) {
int montype = obj->corpsenm;
xchar x, y;
if (obj->where == OBJ_CONTAINED) {
/* deal with corpses in [possibly nested] containers */
struct monst *carrier;
int holder = 0;
container = obj->ocontainer;
carrier = get_container_location(container, &holder,
&container_nesting);
switch(holder) {
case OBJ_MINVENT:
x = carrier->mx; y = carrier->my;
in_container = TRUE;
break;
case OBJ_INVENT:
x = u.ux; y = u.uy;
in_container = TRUE;
break;
case OBJ_FLOOR:
if (!get_obj_location(obj, &x, &y, CONTAINED_TOO))
return (struct monst *) 0;
in_container = TRUE;
break;
default:
return (struct monst *)0;
}
} else {
/* only for invent, minvent, or floor */
if (!get_obj_location(obj, &x, &y, 0))
return (struct monst *) 0;
}
if (in_container) {
/* Rules for revival from containers:
- the container cannot be locked
- the container cannot be heavily nested (>2 is arbitrary)
- the container cannot be a statue or bag of holding
(except in very rare cases for the latter)
*/
if (!x || !y || container->olocked || container_nesting > 2 ||
container->otyp == STATUE ||
(container->otyp == BAG_OF_HOLDING && rn2(40)))
return (struct monst *)0;
}
if (MON_AT(x,y)) {
coord new_xy;
if (enexto(&new_xy, x, y, &mons[montype]))
x = new_xy.x, y = new_xy.y;
}
if(cant_create(&montype, TRUE)) {
/* make a zombie or worm instead */
mtmp = makemon(&mons[montype], x, y,
NO_MINVENT|MM_NOWAIT);
if (mtmp) {
mtmp->mhp = mtmp->mhpmax = 100;
mon_adjust_speed(mtmp, 2, (struct obj *)0); /* MFAST */
}
} else {
if (obj->oxlth && (obj->oattached == OATTACHED_MONST)) {
coord xy;
xy.x = x; xy.y = y;
mtmp = montraits(obj, &xy);
if (mtmp && mtmp->mtame && !mtmp->isminion)
wary_dog(mtmp, TRUE);
} else
mtmp = makemon(&mons[montype], x, y,
NO_MINVENT|MM_NOWAIT|MM_NOCOUNTBIRTH);
if (mtmp) {
if (obj->oxlth && (obj->oattached == OATTACHED_M_ID)) {
unsigned m_id;
struct monst *ghost;
(void) memcpy((genericptr_t)&m_id,
(genericptr_t)obj->oextra, sizeof(m_id));
ghost = find_mid(m_id, FM_FMON);
if (ghost && ghost->data == &mons[PM_GHOST]) {
int x2, y2;
x2 = ghost->mx; y2 = ghost->my;
if (ghost->mtame)
savetame = ghost->mtame;
if (canseemon(ghost))
pline("%s is suddenly drawn into its former body!",
Monnam(ghost));
mondead(ghost);
recorporealization = TRUE;
newsym(x2, y2);
}
/* don't mess with obj->oxlth here */
obj->oattached = OATTACHED_NOTHING;
}
/* Monster retains its name */
if (obj->onamelth)
mtmp = christen_monst(mtmp, ONAME(obj));
/* flag the quest leader as alive. */
if (mtmp->data->msound == MS_LEADER || mtmp->m_id ==
quest_status.leader_m_id)
quest_status.leader_is_dead = FALSE;
}
}
if (mtmp) {
if (obj->oeaten)
mtmp->mhp = eaten_stat(mtmp->mhp, obj);
/* track that this monster was revived at least once */
mtmp->mrevived = 1;
if (recorporealization) {
/* If mtmp is revivification of former tame ghost*/
if (savetame) {
struct monst *mtmp2 = tamedog(mtmp, (struct obj *)0);
if (mtmp2) {
mtmp2->mtame = savetame;
mtmp = mtmp2;
}
}
/* was ghost, now alive, it's all very confusing */
mtmp->mconf = 1;
}
switch (obj->where) {
case OBJ_INVENT:
useup(obj);
break;
case OBJ_FLOOR:
/* in case MON_AT+enexto for invisible mon */
x = obj->ox, y = obj->oy;
/* not useupf(), which charges */
if (obj->quan > 1L)
obj = splitobj(obj, 1L);
delobj(obj);
newsym(x, y);
break;
case OBJ_MINVENT:
m_useup(obj->ocarry, obj);
break;
case OBJ_CONTAINED:
obj_extract_self(obj);
obfree(obj, (struct obj *) 0);
break;
default:
panic("revive");
}
}
}
return mtmp;
}
STATIC_OVL void
revive_egg(obj)
struct obj *obj;
{
/*
* Note: generic eggs with corpsenm set to NON_PM will never hatch.
*/
if (obj->otyp != EGG) return;
if (obj->corpsenm != NON_PM && !dead_species(obj->corpsenm, TRUE))
attach_egg_hatch_timeout(obj);
}
/* try to revive all corpses and eggs carried by `mon' */
int
unturn_dead(mon)
struct monst *mon;
{
struct obj *otmp, *otmp2;
struct monst *mtmp2;
char owner[BUFSZ], corpse[BUFSZ];
boolean youseeit;
int once = 0, res = 0;
youseeit = (mon == &youmonst) ? TRUE : canseemon(mon);
otmp2 = (mon == &youmonst) ? invent : mon->minvent;
while ((otmp = otmp2) != 0) {
otmp2 = otmp->nobj;
if (otmp->otyp == EGG)
revive_egg(otmp);
if (otmp->otyp != CORPSE) continue;
/* save the name; the object is liable to go away */
if (youseeit) Strcpy(corpse, corpse_xname(otmp, TRUE));
/* for a merged group, only one is revived; should this be fixed? */
if ((mtmp2 = revive(otmp)) != 0) {
++res;
if (youseeit) {
if (!once++) Strcpy(owner,
(mon == &youmonst) ? "Your" :
s_suffix(Monnam(mon)));
pline("%s %s suddenly comes alive!", owner, corpse);
} else if (canseemon(mtmp2))
pline("%s suddenly appears!", Amonnam(mtmp2));
}
}
return res;
}
#endif /*OVL1*/
#ifdef OVLB
static const char charged_objs[] = { WAND_CLASS, WEAPON_CLASS, ARMOR_CLASS, 0 };
STATIC_OVL void
costly_cancel(obj)
register struct obj *obj;
{
char objroom;
struct monst *shkp = (struct monst *)0;
if (obj->no_charge) return;
switch (obj->where) {
case OBJ_INVENT:
if (obj->unpaid) {
shkp = shop_keeper(*u.ushops);
if (!shkp) return;
Norep("You cancel an unpaid object, you pay for it!");
bill_dummy_object(obj);
}
break;
case OBJ_FLOOR:
objroom = *in_rooms(obj->ox, obj->oy, SHOPBASE);
shkp = shop_keeper(objroom);
if (!shkp || !inhishop(shkp)) return;
if (costly_spot(u.ux, u.uy) && objroom == *u.ushops) {
Norep("You cancel it, you pay for it!");
bill_dummy_object(obj);
} else
(void) stolen_value(obj, obj->ox, obj->oy, FALSE, FALSE);
break;
}
}
/* cancel obj, possibly carried by you or a monster */
void
cancel_item(obj)
register struct obj *obj;
{
boolean u_ring = (obj == uleft) || (obj == uright);
register boolean holy = (obj->otyp == POT_WATER && obj->blessed);
switch(obj->otyp) {
case RIN_GAIN_STRENGTH:
if ((obj->owornmask & W_RING) && u_ring) {
ABON(A_STR) -= obj->spe;
flags.botl = 1;
}
break;
case RIN_GAIN_CONSTITUTION:
if ((obj->owornmask & W_RING) && u_ring) {
ABON(A_CON) -= obj->spe;
flags.botl = 1;
}
break;
case RIN_ADORNMENT:
if ((obj->owornmask & W_RING) && u_ring) {
ABON(A_CHA) -= obj->spe;
flags.botl = 1;
}
break;
case RIN_INCREASE_ACCURACY:
if ((obj->owornmask & W_RING) && u_ring)
u.uhitinc -= obj->spe;
break;
case RIN_INCREASE_DAMAGE:
if ((obj->owornmask & W_RING) && u_ring)
u.udaminc -= obj->spe;
break;
case GAUNTLETS_OF_DEXTERITY:
if ((obj->owornmask & W_ARMG) && (obj == uarmg)) {
ABON(A_DEX) -= obj->spe;
flags.botl = 1;
}
break;
case HELM_OF_BRILLIANCE:
if ((obj->owornmask & W_ARMH) && (obj == uarmh)) {
ABON(A_INT) -= obj->spe;
ABON(A_WIS) -= obj->spe;
flags.botl = 1;
}
break;
/* case RIN_PROTECTION: not needed */
}
if (objects[obj->otyp].oc_magic
|| (obj->spe && (obj->oclass == ARMOR_CLASS ||
obj->oclass == WEAPON_CLASS || is_weptool(obj)))
|| obj->otyp == POT_ACID || obj->otyp == POT_SICKNESS) {
if (obj->spe != ((obj->oclass == WAND_CLASS) ? -1 : 0) &&
obj->otyp != WAN_CANCELLATION &&
/* can't cancel cancellation */
obj->otyp != MAGIC_LAMP &&
obj->otyp != CANDELABRUM_OF_INVOCATION) {
costly_cancel(obj);
obj->spe = (obj->oclass == WAND_CLASS) ? -1 : 0;
}
switch (obj->oclass) {
case SCROLL_CLASS:
costly_cancel(obj);
obj->otyp = SCR_BLANK_PAPER;
obj->spe = 0;
break;
case SPBOOK_CLASS:
if (obj->otyp != SPE_CANCELLATION &&
obj->otyp != SPE_BOOK_OF_THE_DEAD) {
costly_cancel(obj);
obj->otyp = SPE_BLANK_PAPER;
}
break;
case POTION_CLASS:
costly_cancel(obj);
if (obj->otyp == POT_SICKNESS ||
obj->otyp == POT_SEE_INVISIBLE) {
/* sickness is "biologically contaminated" fruit juice; cancel it
* and it just becomes fruit juice... whereas see invisible
* tastes like "enchanted" fruit juice, it similarly cancels.
*/
obj->otyp = POT_FRUIT_JUICE;
} else {
obj->otyp = POT_WATER;
obj->odiluted = 0; /* same as any other water */
}
break;
}
}
if (holy) costly_cancel(obj);
unbless(obj);
uncurse(obj);
#ifdef INVISIBLE_OBJECTS
if (obj->oinvis) obj->oinvis = 0;
#endif
return;
}
/* Remove a positive enchantment or charge from obj,
* possibly carried by you or a monster
*/
boolean
drain_item(obj)
register struct obj *obj;
{
boolean u_ring;
/* Is this a charged/enchanted object? */
if (!obj || (!objects[obj->otyp].oc_charged &&
obj->oclass != WEAPON_CLASS &&
obj->oclass != ARMOR_CLASS && !is_weptool(obj)) ||
obj->spe <= 0)
return (FALSE);
if (obj_resists(obj, 10, 90))
return (FALSE);
/* Charge for the cost of the object */
costly_cancel(obj); /* The term "cancel" is okay for now */
/* Drain the object and any implied effects */
obj->spe--;
u_ring = (obj == uleft) || (obj == uright);
switch(obj->otyp) {
case RIN_GAIN_STRENGTH:
if ((obj->owornmask & W_RING) && u_ring) {
ABON(A_STR)--;
flags.botl = 1;
}
break;
case RIN_GAIN_CONSTITUTION:
if ((obj->owornmask & W_RING) && u_ring) {
ABON(A_CON)--;
flags.botl = 1;
}
break;
case RIN_ADORNMENT:
if ((obj->owornmask & W_RING) && u_ring) {
ABON(A_CHA)--;
flags.botl = 1;
}
break;
case RIN_INCREASE_ACCURACY:
if ((obj->owornmask & W_RING) && u_ring)
u.uhitinc--;
break;
case RIN_INCREASE_DAMAGE:
if ((obj->owornmask & W_RING) && u_ring)
u.udaminc--;
break;
case HELM_OF_BRILLIANCE:
if ((obj->owornmask & W_ARMH) && (obj == uarmh)) {
ABON(A_INT)--;
ABON(A_WIS)--;
flags.botl = 1;
}
break;
case GAUNTLETS_OF_DEXTERITY:
if ((obj->owornmask & W_ARMG) && (obj == uarmg)) {
ABON(A_DEX)--;
flags.botl = 1;
}
break;
case RIN_PROTECTION:
flags.botl = 1;
break;
}
if (carried(obj)) update_inventory();
return (TRUE);
}
#endif /*OVLB*/
#ifdef OVL0
boolean
obj_resists(obj, ochance, achance)
struct obj *obj;
int ochance, achance; /* percent chance for ordinary objects, artifacts */
{
if (obj->otyp == AMULET_OF_YENDOR ||
obj->otyp == SPE_BOOK_OF_THE_DEAD ||
obj->otyp == CANDELABRUM_OF_INVOCATION ||
obj->otyp == BELL_OF_OPENING ||
(obj->otyp == CORPSE && is_rider(&mons[obj->corpsenm]))) {
return TRUE;
} else {
int chance = rn2(100);
return((boolean)(chance < (obj->oartifact ? achance : ochance)));
}
}
boolean
obj_shudders(obj)
struct obj *obj;
{
int zap_odds;
if (obj->oclass == WAND_CLASS)
zap_odds = 3; /* half-life = 2 zaps */
else if (obj->cursed)
zap_odds = 3; /* half-life = 2 zaps */
else if (obj->blessed)
zap_odds = 12; /* half-life = 8 zaps */
else
zap_odds = 8; /* half-life = 6 zaps */
/* adjust for "large" quantities of identical things */
if(obj->quan > 4L) zap_odds /= 2;
return((boolean)(! rn2(zap_odds)));
}
#endif /*OVL0*/
#ifdef OVLB
/* Use up at least minwt number of things made of material mat.
* There's also a chance that other stuff will be used up. Finally,
* there's a random factor here to keep from always using the stuff
* at the top of the pile.
*/
STATIC_OVL void
polyuse(objhdr, mat, minwt)
struct obj *objhdr;
int mat, minwt;
{
register struct obj *otmp, *otmp2;
for(otmp = objhdr; minwt > 0 && otmp; otmp = otmp2) {
otmp2 = otmp->nexthere;
if (otmp == uball || otmp == uchain) continue;
if (obj_resists(otmp, 0, 0)) continue; /* preserve unique objects */
#ifdef MAIL
if (otmp->otyp == SCR_MAIL) continue;
#endif
if (((int) objects[otmp->otyp].oc_material == mat) ==
(rn2(minwt + 1) != 0)) {
/* appropriately add damage to bill */
if (costly_spot(otmp->ox, otmp->oy)) {
if (*u.ushops)
addtobill(otmp, FALSE, FALSE, FALSE);
else
(void)stolen_value(otmp,
otmp->ox, otmp->oy, FALSE, FALSE);
}
if (otmp->quan < LARGEST_INT)
minwt -= (int)otmp->quan;
else
minwt = 0;
delobj(otmp);
}
}
}
/*
* Polymorph some of the stuff in this pile into a monster, preferably
* a golem of the kind okind.
*/
STATIC_OVL void
create_polymon(obj, okind)
struct obj *obj;
int okind;
{
struct permonst *mdat = (struct permonst *)0;
struct monst *mtmp;
const char *material;
int pm_index;
/* no golems if you zap only one object -- not enough stuff */
if(!obj || (!obj->nexthere && obj->quan == 1L)) return;
/* some of these choices are arbitrary */
switch(okind) {
case IRON:
case METAL:
case MITHRIL:
pm_index = PM_IRON_GOLEM;
material = "metal ";
break;
case COPPER:
case SILVER:
case PLATINUM:
case GEMSTONE:
case MINERAL:
pm_index = rn2(2) ? PM_STONE_GOLEM : PM_CLAY_GOLEM;
material = "lithic ";
break;
case 0:
case FLESH:
/* there is no flesh type, but all food is type 0, so we use it */
pm_index = PM_FLESH_GOLEM;
material = "organic ";
break;
case WOOD:
pm_index = PM_WOOD_GOLEM;
material = "wood ";
break;
case LEATHER:
pm_index = PM_LEATHER_GOLEM;
material = "leather ";
break;
case CLOTH:
pm_index = PM_ROPE_GOLEM;
material = "cloth ";
break;
case BONE:
pm_index = PM_SKELETON; /* nearest thing to "bone golem" */
material = "bony ";
break;
case GOLD:
pm_index = PM_GOLD_GOLEM;
material = "gold ";
break;
case GLASS:
pm_index = PM_GLASS_GOLEM;
material = "glassy ";
break;
case PAPER:
pm_index = PM_PAPER_GOLEM;
material = "paper ";
break;
default:
/* if all else fails... */
pm_index = PM_STRAW_GOLEM;
material = "";
break;
}
if (!(mvitals[pm_index].mvflags & G_GENOD))
mdat = &mons[pm_index];
mtmp = makemon(mdat, obj->ox, obj->oy, NO_MM_FLAGS);
polyuse(obj, okind, (int)mons[pm_index].cwt);
if(mtmp && cansee(mtmp->mx, mtmp->my)) {
pline("Some %sobjects meld, and %s arises from the pile!",
material, a_monnam(mtmp));
}
}
/* Assumes obj is on the floor. */
void
do_osshock(obj)
struct obj *obj;
{
long i;
#ifdef MAIL
if (obj->otyp == SCR_MAIL) return;
#endif
obj_zapped = TRUE;
if(poly_zapped < 0) {
/* some may metamorphosize */
for (i = obj->quan; i; i--)
if (! rn2(Luck + 45)) {
poly_zapped = objects[obj->otyp].oc_material;
break;
}
}
/* if quan > 1 then some will survive intact */
if (obj->quan > 1L) {
if (obj->quan > LARGEST_INT)
obj = splitobj(obj, (long)rnd(30000));
else
obj = splitobj(obj, (long)rnd((int)obj->quan - 1));
}
/* appropriately add damage to bill */
if (costly_spot(obj->ox, obj->oy)) {
if (*u.ushops)
addtobill(obj, FALSE, FALSE, FALSE);
else
(void)stolen_value(obj,
obj->ox, obj->oy, FALSE, FALSE);
}
/* zap the object */
delobj(obj);
}
/*
* Polymorph the object to the given object ID. If the ID is STRANGE_OBJECT
* then pick random object from the source's class (this is the standard
* "polymorph" case). If ID is set to a specific object, inhibit fusing
* n objects into 1. This could have been added as a flag, but currently
* it is tied to not being the standard polymorph case. The new polymorphed
* object replaces obj in its link chains. Return value is a pointer to
* the new object.
*
* This should be safe to call for an object anywhere.
*/
struct obj *
poly_obj(obj, id)
struct obj *obj;
int id;
{
struct obj *otmp;
xchar ox, oy;
boolean can_merge = (id == STRANGE_OBJECT);
int obj_location = obj->where;
if (obj->otyp == BOULDER && In_sokoban(&u.uz))
change_luck(-1); /* Sokoban guilt */
if (id == STRANGE_OBJECT) { /* preserve symbol */
int try_limit = 3;
/* Try up to 3 times to make the magic-or-not status of
the new item be the same as it was for the old one. */
otmp = (struct obj *)0;
do {
if (otmp) delobj(otmp);
otmp = mkobj(obj->oclass, FALSE);
} while (--try_limit > 0 &&
objects[obj->otyp].oc_magic != objects[otmp->otyp].oc_magic);
} else {
/* literally replace obj with this new thing */
otmp = mksobj(id, FALSE, FALSE);
/* Actually more things use corpsenm but they polymorph differently */
#define USES_CORPSENM(typ) ((typ)==CORPSE || (typ)==STATUE || (typ)==FIGURINE)
if (USES_CORPSENM(obj->otyp) && USES_CORPSENM(id))
otmp->corpsenm = obj->corpsenm;
#undef USES_CORPSENM
}
/* preserve quantity */
otmp->quan = obj->quan;
/* preserve the shopkeepers (lack of) interest */
otmp->no_charge = obj->no_charge;
/* preserve inventory letter if in inventory */
if (obj_location == OBJ_INVENT)
otmp->invlet = obj->invlet;
#ifdef MAIL
/* You can't send yourself 100 mail messages and then
* polymorph them into useful scrolls
*/
if (obj->otyp == SCR_MAIL) {
otmp->otyp = SCR_MAIL;
otmp->spe = 1;
}
#endif
/* avoid abusing eggs laid by you */
if (obj->otyp == EGG && obj->spe) {
int mnum, tryct = 100;
/* first, turn into a generic egg */
if (otmp->otyp == EGG)
kill_egg(otmp);
else {
otmp->otyp = EGG;
otmp->owt = weight(otmp);
}
otmp->corpsenm = NON_PM;
otmp->spe = 0;
/* now change it into something layed by the hero */
while (tryct--) {
mnum = can_be_hatched(random_monster());
if (mnum != NON_PM && !dead_species(mnum, TRUE)) {
otmp->spe = 1; /* layed by hero */
otmp->corpsenm = mnum;
attach_egg_hatch_timeout(otmp);
break;
}
}
}
/* keep special fields (including charges on wands) */
if (index(charged_objs, otmp->oclass)) otmp->spe = obj->spe;
otmp->recharged = obj->recharged;
otmp->cursed = obj->cursed;
otmp->blessed = obj->blessed;
otmp->oeroded = obj->oeroded;
otmp->oeroded2 = obj->oeroded2;
if (!is_flammable(otmp) && !is_rustprone(otmp)) otmp->oeroded = 0;
if (!is_corrodeable(otmp) && !is_rottable(otmp)) otmp->oeroded2 = 0;
if (is_damageable(otmp))
otmp->oerodeproof = obj->oerodeproof;
/* Keep chest/box traps and poisoned ammo if we may */
if (obj->otrapped && Is_box(otmp)) otmp->otrapped = TRUE;
if (obj->opoisoned && is_poisonable(otmp))
otmp->opoisoned = TRUE;
if (id == STRANGE_OBJECT && obj->otyp == CORPSE) {
/* turn crocodile corpses into shoes */
if (obj->corpsenm == PM_CROCODILE) {
otmp->otyp = LOW_BOOTS;
otmp->oclass = ARMOR_CLASS;
otmp->spe = 0;
otmp->oeroded = 0;
otmp->oerodeproof = TRUE;
otmp->quan = 1L;
otmp->cursed = FALSE;
}
}
/* no box contents --KAA */
if (Has_contents(otmp)) delete_contents(otmp);
/* 'n' merged objects may be fused into 1 object */
if (otmp->quan > 1L && (!objects[otmp->otyp].oc_merge ||
(can_merge && otmp->quan > (long)rn2(1000))))
otmp->quan = 1L;
switch (otmp->oclass) {
case TOOL_CLASS:
if (otmp->otyp == MAGIC_LAMP) {
otmp->otyp = OIL_LAMP;
otmp->age = 1500L; /* "best" oil lamp possible */
} else if (otmp->otyp == MAGIC_MARKER) {
otmp->recharged = 1; /* degraded quality */
}
/* don't care about the recharge count of other tools */
break;
case WAND_CLASS:
while (otmp->otyp == WAN_WISHING || otmp->otyp == WAN_POLYMORPH)
otmp->otyp = rnd_class(WAN_LIGHT, WAN_LIGHTNING);
/* altering the object tends to degrade its quality
(analogous to spellbook `read count' handling) */
if ((int)otmp->recharged < rn2(7)) /* recharge_limit */
otmp->recharged++;
break;
case POTION_CLASS:
while (otmp->otyp == POT_POLYMORPH)
otmp->otyp = rnd_class(POT_GAIN_ABILITY, POT_WATER);
break;
case SPBOOK_CLASS:
while (otmp->otyp == SPE_POLYMORPH)
otmp->otyp = rnd_class(SPE_DIG, SPE_BLANK_PAPER);
/* reduce spellbook abuse */
otmp->spestudied = obj->spestudied + 1;
break;
case GEM_CLASS:
if (otmp->quan > (long) rnd(4) &&
objects[obj->otyp].oc_material == MINERAL &&
objects[otmp->otyp].oc_material != MINERAL) {
otmp->otyp = ROCK; /* transmutation backfired */
otmp->quan /= 2L; /* some material has been lost */
}
break;
}
/* update the weight */
otmp->owt = weight(otmp);
/* for now, take off worn items being polymorphed */
if (obj_location == OBJ_INVENT) {
if (id == STRANGE_OBJECT)
remove_worn_item(obj, TRUE);
else {
/* This is called only for stone to flesh. It's a lot simpler
* than it otherwise might be. We don't need to check for
* special effects when putting them on (no meat objects have
* any) and only three worn masks are possible.
*/
otmp->owornmask = obj->owornmask;
remove_worn_item(obj, TRUE);
setworn(otmp, otmp->owornmask);
if (otmp->owornmask & LEFT_RING)
uleft = otmp;
if (otmp->owornmask & RIGHT_RING)
uright = otmp;
if (otmp->owornmask & W_WEP)
uwep = otmp;
if (otmp->owornmask & W_SWAPWEP)
uswapwep = otmp;
if (otmp->owornmask & W_QUIVER)
uquiver = otmp;
goto no_unwear;
}
}
/* preserve the mask in case being used by something else */
otmp->owornmask = obj->owornmask;
no_unwear:
if (obj_location == OBJ_FLOOR && obj->otyp == BOULDER &&
otmp->otyp != BOULDER)
unblock_point(obj->ox, obj->oy);
/* ** we are now done adjusting the object ** */
/* swap otmp for obj */
replace_object(obj, otmp);
if (obj_location == OBJ_INVENT) {
/*
* We may need to do extra adjustments for the hero if we're
* messing with the hero's inventory. The following calls are
* equivalent to calling freeinv on obj and addinv on otmp,
* while doing an in-place swap of the actual objects.
*/
freeinv_core(obj);
addinv_core1(otmp);
addinv_core2(otmp);
}
if ((!carried(otmp) || obj->unpaid) &&
get_obj_location(otmp, &ox, &oy, BURIED_TOO|CONTAINED_TOO) &&
costly_spot(ox, oy)) {
register struct monst *shkp =
shop_keeper(*in_rooms(ox, oy, SHOPBASE));
if ((!obj->no_charge ||
(Has_contents(obj) &&
(contained_cost(obj, shkp, 0L, FALSE, FALSE) != 0L)))
&& inhishop(shkp)) {
if(shkp->mpeaceful) {
if(*u.ushops && *in_rooms(u.ux, u.uy, 0) ==
*in_rooms(shkp->mx, shkp->my, 0) &&
!costly_spot(u.ux, u.uy))
make_angry_shk(shkp, ox, oy);
else {
pline("%s gets angry!", Monnam(shkp));
hot_pursuit(shkp);
}
} else Norep("%s is furious!", Monnam(shkp));
}
}
delobj(obj);
return otmp;
}
/*
* Object obj was hit by the effect of the wand/spell otmp. Return
* non-zero if the wand/spell had any effect.
*/
int
bhito(obj, otmp)
struct obj *obj, *otmp;
{
int res = 1; /* affected object by default */
xchar refresh_x, refresh_y;
if (obj->bypass) {
/* The bypass bit is currently only used as follows:
*
* POLYMORPH - When a monster being polymorphed drops something
* from its inventory as a result of the change.
* If the items fall to the floor, they are not
* subject to direct subsequent polymorphing
* themselves on that same zap. This makes it
* consistent with items that remain in the
* monster's inventory. They are not polymorphed
* either.
* UNDEAD_TURNING - When an undead creature gets killed via
* undead turning, prevent its corpse from being
* immediately revived by the same effect.
*
* The bypass bit on all objects is reset each turn, whenever
* flags.bypasses is set.
*
* We check the obj->bypass bit above AND flags.bypasses
* as a safeguard against any stray occurrence left in an obj
* struct someplace, although that should never happen.
*/
if (flags.bypasses)
return 0;
else {
#ifdef DEBUG
pline("%s for a moment.", Tobjnam(obj, "pulsate"));
#endif
obj->bypass = 0;
}
}
/*
* Some parts of this function expect the object to be on the floor
* obj->{ox,oy} to be valid. The exception to this (so far) is
* for the STONE_TO_FLESH spell.
*/
if (!(obj->where == OBJ_FLOOR || otmp->otyp == SPE_STONE_TO_FLESH))
impossible("bhito: obj is not floor or Stone To Flesh spell");
if (obj == uball) {
res = 0;
} else if (obj == uchain) {
if (otmp->otyp == WAN_OPENING || otmp->otyp == SPE_KNOCK) {
unpunish();
makeknown(otmp->otyp);
} else
res = 0;
} else
switch(otmp->otyp) {
case WAN_POLYMORPH:
case SPE_POLYMORPH:
if (obj->otyp == WAN_POLYMORPH ||
obj->otyp == SPE_POLYMORPH ||
obj->otyp == POT_POLYMORPH ||
obj_resists(obj, 5, 95)) {
res = 0;
break;
}
/* KMH, conduct */
u.uconduct.polypiles++;
/* any saved lock context will be dangerously obsolete */
if (Is_box(obj)) (void) boxlock(obj, otmp);
if (obj_shudders(obj)) {
if (cansee(obj->ox, obj->oy))
makeknown(otmp->otyp);
do_osshock(obj);
break;
}
obj = poly_obj(obj, STRANGE_OBJECT);
newsym(obj->ox,obj->oy);
break;
case WAN_PROBING:
res = !obj->dknown;
/* target object has now been "seen (up close)" */
obj->dknown = 1;
if (Is_container(obj) || obj->otyp == STATUE) {
if (!obj->cobj)
pline("%s empty.", Tobjnam(obj, "are"));
else {
struct obj *o;
/* view contents (not recursively) */
for (o = obj->cobj; o; o = o->nobj)
o->dknown = 1; /* "seen", even if blind */
(void) display_cinventory(obj);
}
res = 1;
}
if (res) makeknown(WAN_PROBING);
break;
case WAN_STRIKING:
case SPE_FORCE_BOLT:
if (obj->otyp == BOULDER)
fracture_rock(obj);
else if (obj->otyp == STATUE)
(void) break_statue(obj);
else {
if (!flags.mon_moving)
(void)hero_breaks(obj, obj->ox, obj->oy, FALSE);
else
(void)breaks(obj, obj->ox, obj->oy);
res = 0;
}
/* BUG[?]: shouldn't this depend upon you seeing it happen? */
makeknown(otmp->otyp);
break;
case WAN_CANCELLATION:
case SPE_CANCELLATION:
cancel_item(obj);
#ifdef TEXTCOLOR
newsym(obj->ox,obj->oy); /* might change color */
#endif
break;
case SPE_DRAIN_LIFE:
(void) drain_item(obj);
break;
case WAN_TELEPORTATION:
case SPE_TELEPORT_AWAY:
rloco(obj);
break;
case WAN_MAKE_INVISIBLE:
#ifdef INVISIBLE_OBJECTS
obj->oinvis = TRUE;
newsym(obj->ox,obj->oy); /* make object disappear */
#endif
break;
case WAN_UNDEAD_TURNING:
case SPE_TURN_UNDEAD:
if (obj->otyp == EGG)
revive_egg(obj);
else
res = !!revive(obj);
break;
case WAN_OPENING:
case SPE_KNOCK:
case WAN_LOCKING:
case SPE_WIZARD_LOCK:
if(Is_box(obj))
res = boxlock(obj, otmp);
else
res = 0;
if (res /* && otmp->oclass == WAND_CLASS */)
makeknown(otmp->otyp);
break;
case WAN_SLOW_MONSTER: /* no effect on objects */
case SPE_SLOW_MONSTER:
case WAN_SPEED_MONSTER:
case WAN_NOTHING:
case SPE_HEALING:
case SPE_EXTRA_HEALING:
res = 0;
break;
case SPE_STONE_TO_FLESH:
refresh_x = obj->ox; refresh_y = obj->oy;
if (objects[obj->otyp].oc_material != MINERAL &&
objects[obj->otyp].oc_material != GEMSTONE) {
res = 0;
break;
}
/* add more if stone objects are added.. */
switch (objects[obj->otyp].oc_class) {
case ROCK_CLASS: /* boulders and statues */
if (obj->otyp == BOULDER) {
obj = poly_obj(obj, HUGE_CHUNK_OF_MEAT);
goto smell;
} else if (obj->otyp == STATUE) {
xchar oox, ooy;
(void) get_obj_location(obj, &oox, &ooy, 0);
refresh_x = oox; refresh_y = ooy;
if (vegetarian(&mons[obj->corpsenm])) {
/* Don't animate monsters that aren't flesh */
obj = poly_obj(obj, MEATBALL);
goto smell;
}
if (!animate_statue(obj, oox, ooy,
ANIMATE_SPELL, (int *)0)) {
struct obj *item;
makecorpse: if (mons[obj->corpsenm].geno &
(G_NOCORPSE|G_UNIQ)) {
res = 0;
break;
}
/* Unlikely to get here since genociding
* monsters also sets the G_NOCORPSE flag.
* Drop the contents, poly_obj looses them.
*/
while ((item = obj->cobj) != 0) {
obj_extract_self(item);
place_object(item, oox, ooy);
}
obj = poly_obj(obj, CORPSE);
break;
}
} else { /* new rock class object... */
/* impossible? */
res = 0;
}
break;
case TOOL_CLASS: /* figurine */
{
struct monst *mon;
xchar oox, ooy;
if (obj->otyp != FIGURINE) {
res = 0;
break;
}
if (vegetarian(&mons[obj->corpsenm])) {
/* Don't animate monsters that aren't flesh */
obj = poly_obj(obj, MEATBALL);
goto smell;
}
(void) get_obj_location(obj, &oox, &ooy, 0);
refresh_x = oox; refresh_y = ooy;
mon = makemon(&mons[obj->corpsenm],
oox, ooy, NO_MM_FLAGS);
if (mon) {
delobj(obj);
if (cansee(mon->mx, mon->my))
pline_The("figurine animates!");
break;
}
goto makecorpse;
}
/* maybe add weird things to become? */
case RING_CLASS: /* some of the rings are stone */
obj = poly_obj(obj, MEAT_RING);
goto smell;
case WAND_CLASS: /* marble wand */
obj = poly_obj(obj, MEAT_STICK);
goto smell;
case GEM_CLASS: /* rocks & gems */
obj = poly_obj(obj, MEATBALL);
smell:
if (herbivorous(youmonst.data) &&
(!carnivorous(youmonst.data) ||
Role_if(PM_MONK) || !u.uconduct.unvegetarian))
Norep("You smell the odor of meat.");
else
Norep("You smell a delicious smell.");
break;
case WEAPON_CLASS: /* crysknife */
/* fall through */
default:
res = 0;
break;
}
newsym(refresh_x, refresh_y);
break;
default:
impossible("What an interesting effect (%d)", otmp->otyp);
break;
}
return res;
}
/* returns nonzero if something was hit */
int
bhitpile(obj,fhito,tx,ty)
struct obj *obj;
int FDECL((*fhito), (OBJ_P,OBJ_P));
int tx, ty;
{
int hitanything = 0;
register struct obj *otmp, *next_obj;
if (obj->otyp == SPE_FORCE_BOLT || obj->otyp == WAN_STRIKING) {
struct trap *t = t_at(tx, ty);
/* We can't settle for the default calling sequence of
bhito(otmp) -> break_statue(otmp) -> activate_statue_trap(ox,oy)
because that last call might end up operating on our `next_obj'
(below), rather than on the current object, if it happens to
encounter a statue which mustn't become animated. */
if (t && t->ttyp == STATUE_TRAP &&
activate_statue_trap(t, tx, ty, TRUE) && obj->otyp == WAN_STRIKING)
makeknown(obj->otyp);
}
poly_zapped = -1;
for(otmp = level.objects[tx][ty]; otmp; otmp = next_obj) {
/* Fix for polymorph bug, <NAME> */
next_obj = otmp->nexthere;
hitanything += (*fhito)(otmp, obj);
}
if(poly_zapped >= 0)
create_polymon(level.objects[tx][ty], poly_zapped);
return hitanything;
}
#endif /*OVLB*/
#ifdef OVL1
/*
* zappable - returns 1 if zap is available, 0 otherwise.
* it removes a charge from the wand if zappable.
* added by GAN 11/03/86
*/
int
zappable(wand)
register struct obj *wand;
{
if(wand->spe < 0 || (wand->spe == 0 && rn2(121)))
return 0;
if(wand->spe == 0)
You("wrest one last charge from the worn-out wand.");
wand->spe--;
return 1;
}
/*
* zapnodir - zaps a NODIR wand/spell.
* added by GAN 11/03/86
*/
void
zapnodir(obj)
register struct obj *obj;
{
boolean known = FALSE;
switch(obj->otyp) {
case WAN_LIGHT:
case SPE_LIGHT:
litroom(TRUE,obj);
if (!Blind) known = TRUE;
break;
case WAN_SECRET_DOOR_DETECTION:
case SPE_DETECT_UNSEEN:
if(!findit()) return;
if (!Blind) known = TRUE;
break;
case WAN_CREATE_MONSTER:
known = create_critters(rn2(23) ? 1 : rn1(7,2),
(struct permonst *)0);
break;
case WAN_WISHING:
known = TRUE;
if(Luck + rn2(5) < 0) {
pline("Unfortunately, nothing happens.");
break;
}
makewish();
break;
case WAN_ENLIGHTENMENT:
known = TRUE;
You_feel("self-knowledgeable...");
display_nhwindow(WIN_MESSAGE, FALSE);
enlightenment(FALSE);
pline_The("feeling subsides.");
exercise(A_WIS, TRUE);
break;
}
if (known && !objects[obj->otyp].oc_name_known) {
makeknown(obj->otyp);
more_experienced(0,10);
}
}
#endif /*OVL1*/
#ifdef OVL0
STATIC_OVL void
backfire(otmp)
struct obj *otmp;
{
otmp->in_use = TRUE; /* in case losehp() is fatal */
pline("%s suddenly explodes!", The(xname(otmp)));
losehp(d(otmp->spe+2,6), "exploding wand", KILLED_BY_AN);
useup(otmp);
}
static NEARDATA const char zap_syms[] = { WAND_CLASS, 0 };
int
dozap()
{
register struct obj *obj;
int damage;
if(check_capacity((char *)0)) return(0);
obj = getobj(zap_syms, "zap");
if(!obj) return(0);
check_unpaid(obj);
/* zappable addition done by GAN 11/03/86 */
if(!zappable(obj)) pline(nothing_happens);
else if(obj->cursed && !rn2(100)) {
backfire(obj); /* the wand blows up in your face! */
exercise(A_STR, FALSE);
return(1);
} else if(!(objects[obj->otyp].oc_dir == NODIR) && !getdir((char *)0)) {
if (!Blind)
pline("%s glows and fades.", The(xname(obj)));
/* make him pay for knowing !NODIR */
} else if(!u.dx && !u.dy && !u.dz && !(objects[obj->otyp].oc_dir == NODIR)) {
if ((damage = zapyourself(obj, TRUE)) != 0) {
char buf[BUFSZ];
Sprintf(buf, "zapped %sself with a wand", uhim());
losehp(damage, buf, NO_KILLER_PREFIX);
}
} else {
/* Are we having fun yet?
* weffects -> buzz(obj->otyp) -> zhitm (temple priest) ->
* attack -> hitum -> known_hitum -> ghod_hitsu ->
* buzz(AD_ELEC) -> destroy_item(WAND_CLASS) ->
* useup -> obfree -> dealloc_obj -> free(obj)
*/
current_wand = obj;
weffects(obj);
obj = current_wand;
current_wand = 0;
}
if (obj && obj->spe < 0) {
pline("%s to dust.", Tobjnam(obj, "turn"));
useup(obj);
}
update_inventory(); /* maybe used a charge */
return(1);
}
int
zapyourself(obj, ordinary)
struct obj *obj;
boolean ordinary;
{
int damage = 0;
char buf[BUFSZ];
switch(obj->otyp) {
case WAN_STRIKING:
makeknown(WAN_STRIKING);
case SPE_FORCE_BOLT:
if(Antimagic) {
shieldeff(u.ux, u.uy);
pline("Boing!");
} else {
if (ordinary) {
You("bash yourself!");
damage = d(2,12);
} else
damage = d(1 + obj->spe,6);
exercise(A_STR, FALSE);
}
break;
case WAN_LIGHTNING:
makeknown(WAN_LIGHTNING);
if (!Shock_resistance) {
You("shock yourself!");
damage = d(12,6);
exercise(A_CON, FALSE);
} else {
shieldeff(u.ux, u.uy);
You("zap yourself, but seem unharmed.");
ugolemeffects(AD_ELEC, d(12,6));
}
destroy_item(WAND_CLASS, AD_ELEC);
destroy_item(RING_CLASS, AD_ELEC);
if (!resists_blnd(&youmonst)) {
You(are_blinded_by_the_flash);
make_blinded((long)rnd(100),FALSE);
if (!Blind) Your(vision_clears);
}
break;
case SPE_FIREBALL:
You("explode a fireball on top of yourself!");
explode(u.ux, u.uy, 11, d(6,6), WAND_CLASS, EXPL_FIERY);
break;
case WAN_FIRE:
makeknown(WAN_FIRE);
case FIRE_HORN:
if (Fire_resistance) {
shieldeff(u.ux, u.uy);
You_feel("rather warm.");
ugolemeffects(AD_FIRE, d(12,6));
} else {
pline("You've set yourself afire!");
damage = d(12,6);
}
burn_away_slime();
(void) burnarmor(&youmonst);
destroy_item(SCROLL_CLASS, AD_FIRE);
destroy_item(POTION_CLASS, AD_FIRE);
destroy_item(SPBOOK_CLASS, AD_FIRE);
break;
case WAN_COLD:
makeknown(WAN_COLD);
case SPE_CONE_OF_COLD:
case FROST_HORN:
if (Cold_resistance) {
shieldeff(u.ux, u.uy);
You_feel("a little chill.");
ugolemeffects(AD_COLD, d(12,6));
} else {
You("imitate a popsicle!");
damage = d(12,6);
}
destroy_item(POTION_CLASS, AD_COLD);
break;
case WAN_MAGIC_MISSILE:
makeknown(WAN_MAGIC_MISSILE);
case SPE_MAGIC_MISSILE:
if(Antimagic) {
shieldeff(u.ux, u.uy);
pline_The("missiles bounce!");
} else {
damage = d(4,6);
pline("Idiot! You've shot yourself!");
}
break;
case WAN_POLYMORPH:
if (!Unchanging)
makeknown(WAN_POLYMORPH);
case SPE_POLYMORPH:
if (!Unchanging)
polyself(FALSE);
break;
case WAN_CANCELLATION:
case SPE_CANCELLATION:
(void) cancel_monst(&youmonst, obj, TRUE, FALSE, TRUE);
break;
case SPE_DRAIN_LIFE:
if (!Drain_resistance) {
losexp("life drainage");
makeknown(obj->otyp);
}
damage = 0; /* No additional damage */
break;
case WAN_MAKE_INVISIBLE: {
/* have to test before changing HInvis but must change
* HInvis before doing newsym().
*/
int msg = !Invis && !Blind && !BInvis;
if (BInvis && uarmc->otyp == MUMMY_WRAPPING) {
/* A mummy wrapping absorbs it and protects you */
You_feel("rather itchy under your %s.", xname(uarmc));
break;
}
if (ordinary || !rn2(10)) { /* permanent */
HInvis |= FROMOUTSIDE;
} else { /* temporary */
incr_itimeout(&HInvis, d(obj->spe, 250));
}
if (msg) {
makeknown(WAN_MAKE_INVISIBLE);
newsym(u.ux, u.uy);
self_invis_message();
}
break;
}
case WAN_SPEED_MONSTER:
if (!(HFast & INTRINSIC)) {
if (!Fast)
You("speed up.");
else
Your("quickness feels more natural.");
makeknown(WAN_SPEED_MONSTER);
exercise(A_DEX, TRUE);
}
HFast |= FROMOUTSIDE;
break;
case WAN_SLEEP:
makeknown(WAN_SLEEP);
case SPE_SLEEP:
if(Sleep_resistance) {
shieldeff(u.ux, u.uy);
You("don't feel sleepy!");
} else {
pline_The("sleep ray hits you!");
fall_asleep(-rnd(50), TRUE);
}
break;
case WAN_SLOW_MONSTER:
case SPE_SLOW_MONSTER:
if(HFast & (TIMEOUT | INTRINSIC)) {
u_slow_down();
makeknown(obj->otyp);
}
break;
case WAN_TELEPORTATION:
case SPE_TELEPORT_AWAY:
tele();
break;
case WAN_DEATH:
case SPE_FINGER_OF_DEATH:
if (nonliving(youmonst.data) || is_demon(youmonst.data)) {
pline((obj->otyp == WAN_DEATH) ?
"The wand shoots an apparently harmless beam at you."
: "You seem no deader than before.");
break;
}
Sprintf(buf, "shot %sself with a death ray", uhim());
killer = buf;
killer_format = NO_KILLER_PREFIX;
You("irradiate yourself with pure energy!");
You("die.");
makeknown(obj->otyp);
/* They might survive with an amulet of life saving */
done(DIED);
break;
case WAN_UNDEAD_TURNING:
makeknown(WAN_UNDEAD_TURNING);
case SPE_TURN_UNDEAD:
(void) unturn_dead(&youmonst);
if (is_undead(youmonst.data)) {
You_feel("frightened and %sstunned.",
Stunned ? "even more " : "");
make_stunned(HStun + rnd(30), FALSE);
} else
You("shudder in dread.");
break;
case SPE_HEALING:
case SPE_EXTRA_HEALING:
healup(d(6, obj->otyp == SPE_EXTRA_HEALING ? 8 : 4),
0, FALSE, (obj->otyp == SPE_EXTRA_HEALING));
You_feel("%sbetter.",
obj->otyp == SPE_EXTRA_HEALING ? "much " : "");
break;
case WAN_LIGHT: /* (broken wand) */
/* assert( !ordinary ); */
damage = d(obj->spe, 25);
#ifdef TOURIST
case EXPENSIVE_CAMERA:
#endif
damage += rnd(25);
if (!resists_blnd(&youmonst)) {
You(are_blinded_by_the_flash);
make_blinded((long)damage, FALSE);
makeknown(obj->otyp);
if (!Blind) Your(vision_clears);
}
damage = 0; /* reset */
break;
case WAN_OPENING:
if (Punished) makeknown(WAN_OPENING);
case SPE_KNOCK:
if (Punished) Your("chain quivers for a moment.");
break;
case WAN_DIGGING:
case SPE_DIG:
case SPE_DETECT_UNSEEN:
case WAN_NOTHING:
case WAN_LOCKING:
case SPE_WIZARD_LOCK:
break;
case WAN_PROBING:
for (obj = invent; obj; obj = obj->nobj)
obj->dknown = 1;
/* note: `obj' reused; doesn't point at wand anymore */
makeknown(WAN_PROBING);
ustatusline();
break;
case SPE_STONE_TO_FLESH:
{
struct obj *otemp, *onext;
boolean didmerge;
if (u.umonnum == PM_STONE_GOLEM)
(void) polymon(PM_FLESH_GOLEM);
if (Stoned) fix_petrification(); /* saved! */
/* but at a cost.. */
for (otemp = invent; otemp; otemp = onext) {
onext = otemp->nobj;
(void) bhito(otemp, obj);
}
/*
* It is possible that we can now merge some inventory.
* Do a higly paranoid merge. Restart from the beginning
* until no merges.
*/
do {
didmerge = FALSE;
for (otemp = invent; !didmerge && otemp; otemp = otemp->nobj)
for (onext = otemp->nobj; onext; onext = onext->nobj)
if (merged(&otemp, &onext)) {
didmerge = TRUE;
break;
}
} while (didmerge);
}
break;
default: impossible("object %d used?",obj->otyp);
break;
}
return(damage);
}
#ifdef STEED
/* you've zapped a wand downwards while riding
* Return TRUE if the steed was hit by the wand.
* Return FALSE if the steed was not hit by the wand.
*/
STATIC_OVL boolean
zap_steed(obj)
struct obj *obj; /* wand or spell */
{
int steedhit = FALSE;
switch (obj->otyp) {
/*
* Wands that are allowed to hit the steed
* Carefully test the results of any that are
* moved here from the bottom section.
*/
case WAN_PROBING:
probe_monster(u.usteed);
makeknown(WAN_PROBING);
steedhit = TRUE;
break;
case WAN_TELEPORTATION:
case SPE_TELEPORT_AWAY:
/* you go together */
tele();
if(Teleport_control || !couldsee(u.ux0, u.uy0) ||
(distu(u.ux0, u.uy0) >= 16))
makeknown(obj->otyp);
steedhit = TRUE;
break;
/* Default processing via bhitm() for these */
case SPE_CURE_SICKNESS:
case WAN_MAKE_INVISIBLE:
case WAN_CANCELLATION:
case SPE_CANCELLATION:
case WAN_POLYMORPH:
case SPE_POLYMORPH:
case WAN_STRIKING:
case SPE_FORCE_BOLT:
case WAN_SLOW_MONSTER:
case SPE_SLOW_MONSTER:
case WAN_SPEED_MONSTER:
case SPE_HEALING:
case SPE_EXTRA_HEALING:
case SPE_DRAIN_LIFE:
case WAN_OPENING:
case SPE_KNOCK:
(void) bhitm(u.usteed, obj);
steedhit = TRUE;
break;
default:
steedhit = FALSE;
break;
}
return steedhit;
}
#endif
#endif /*OVL0*/
#ifdef OVL3
/*
* cancel a monster (possibly the hero). inventory is cancelled only
* if the monster is zapping itself directly, since otherwise the
* effect is too strong. currently non-hero monsters do not zap
* themselves with cancellation.
*/
boolean
cancel_monst(mdef, obj, youattack, allow_cancel_kill, self_cancel)
register struct monst *mdef;
register struct obj *obj;
boolean youattack, allow_cancel_kill, self_cancel;
{
boolean youdefend = (mdef == &youmonst);
static const char writing_vanishes[] =
"Some writing vanishes from %s head!";
static const char your[] = "your"; /* should be extern */
if (youdefend ? (!youattack && Antimagic)
: resist(mdef, obj->oclass, 0, NOTELL))
return FALSE; /* resisted cancellation */
if (self_cancel) { /* 1st cancel inventory */
struct obj *otmp;
for (otmp = (youdefend ? invent : mdef->minvent);
otmp; otmp = otmp->nobj)
cancel_item(otmp);
if (youdefend) {
flags.botl = 1; /* potential AC change */
find_ac();
}
}
/* now handle special cases */
if (youdefend) {
if (Upolyd) {
if ((u.umonnum == PM_CLAY_GOLEM) && !Blind)
pline(writing_vanishes, your);
if (Unchanging)
Your("amulet grows hot for a moment, then cools.");
else
rehumanize();
}
} else {
mdef->mcan = TRUE;
if (is_were(mdef->data) && mdef->data->mlet != S_HUMAN)
were_change(mdef);
if (mdef->data == &mons[PM_CLAY_GOLEM]) {
if (canseemon(mdef))
pline(writing_vanishes, s_suffix(mon_nam(mdef)));
if (allow_cancel_kill) {
if (youattack)
killed(mdef);
else
monkilled(mdef, "", AD_SPEL);
}
}
}
return TRUE;
}
/* you've zapped an immediate type wand up or down */
STATIC_OVL boolean
zap_updown(obj)
struct obj *obj; /* wand or spell */
{
boolean striking = FALSE, disclose = FALSE;
int x, y, xx, yy, ptmp;
struct obj *otmp;
struct engr *e;
struct trap *ttmp;
char buf[BUFSZ];
/* some wands have special effects other than normal bhitpile */
/* drawbridge might change <u.ux,u.uy> */
x = xx = u.ux; /* <x,y> is zap location */
y = yy = u.uy; /* <xx,yy> is drawbridge (portcullis) position */
ttmp = t_at(x, y); /* trap if there is one */
switch (obj->otyp) {
case WAN_PROBING:
ptmp = 0;
if (u.dz < 0) {
You("probe towards the %s.", ceiling(x,y));
} else {
ptmp += bhitpile(obj, bhito, x, y);
You("probe beneath the %s.", surface(x,y));
ptmp += display_binventory(x, y, TRUE);
}
if (!ptmp) Your("probe reveals nothing.");
return TRUE; /* we've done our own bhitpile */
case WAN_OPENING:
case SPE_KNOCK:
/* up or down, but at closed portcullis only */
if (is_db_wall(x,y) && find_drawbridge(&xx, &yy)) {
open_drawbridge(xx, yy);
disclose = TRUE;
} else if (u.dz > 0 && (x == xdnstair && y == ydnstair) &&
/* can't use the stairs down to quest level 2 until
leader "unlocks" them; give feedback if you try */
on_level(&u.uz, &qstart_level) && !ok_to_quest()) {
pline_The("stairs seem to ripple momentarily.");
disclose = TRUE;
}
break;
case WAN_STRIKING:
case SPE_FORCE_BOLT:
striking = TRUE;
/*FALLTHRU*/
case WAN_LOCKING:
case SPE_WIZARD_LOCK:
/* down at open bridge or up or down at open portcullis */
if ((levl[x][y].typ == DRAWBRIDGE_DOWN) ? (u.dz > 0) :
(is_drawbridge_wall(x,y) && !is_db_wall(x,y)) &&
find_drawbridge(&xx, &yy)) {
if (!striking)
close_drawbridge(xx, yy);
else
destroy_drawbridge(xx, yy);
disclose = TRUE;
} else if (striking && u.dz < 0 && rn2(3) &&
!Is_airlevel(&u.uz) && !Is_waterlevel(&u.uz) &&
!Underwater && !Is_qstart(&u.uz)) {
/* similar to zap_dig() */
pline("A rock is dislodged from the %s and falls on your %s.",
ceiling(x, y), body_part(HEAD));
losehp(rnd((uarmh && is_metallic(uarmh)) ? 2 : 6),
"falling rock", KILLED_BY_AN);
if ((otmp = mksobj_at(ROCK, x, y, FALSE, FALSE)) != 0) {
(void)xname(otmp); /* set dknown, maybe bknown */
stackobj(otmp);
}
newsym(x, y);
} else if (!striking && ttmp && ttmp->ttyp == TRAPDOOR && u.dz > 0) {
if (!Blind) {
if (ttmp->tseen) {
pline("A trap door beneath you closes up then vanishes.");
disclose = TRUE;
} else {
You("see a swirl of %s beneath you.",
is_ice(x,y) ? "frost" : "dust");
}
} else {
You_hear("a twang followed by a thud.");
}
deltrap(ttmp);
ttmp = (struct trap *)0;
newsym(x, y);
}
break;
case SPE_STONE_TO_FLESH:
if (Is_airlevel(&u.uz) || Is_waterlevel(&u.uz) ||
Underwater || (Is_qstart(&u.uz) && u.dz < 0)) {
pline(nothing_happens);
} else if (u.dz < 0) { /* we should do more... */
pline("Blood drips on your %s.", body_part(FACE));
} else if (u.dz > 0 && !OBJ_AT(u.ux, u.uy)) {
/*
Print this message only if there wasn't an engraving
affected here. If water or ice, act like waterlevel case.
*/
e = engr_at(u.ux, u.uy);
if (!(e && e->engr_type == ENGRAVE)) {
if (is_pool(u.ux, u.uy) || is_ice(u.ux, u.uy))
pline(nothing_happens);
else
pline("Blood %ss %s your %s.",
is_lava(u.ux, u.uy) ? "boil" : "pool",
Levitation ? "beneath" : "at",
makeplural(body_part(FOOT)));
}
}
break;
default:
break;
}
if (u.dz > 0) {
/* zapping downward */
(void) bhitpile(obj, bhito, x, y);
/* subset of engraving effects; none sets `disclose' */
if ((e = engr_at(x, y)) != 0 && e->engr_type != HEADSTONE) {
switch (obj->otyp) {
case WAN_POLYMORPH:
case SPE_POLYMORPH:
del_engr(e);
make_engr_at(x, y, random_engraving(buf), moves, (xchar)0);
break;
case WAN_CANCELLATION:
case SPE_CANCELLATION:
case WAN_MAKE_INVISIBLE:
del_engr(e);
break;
case WAN_TELEPORTATION:
case SPE_TELEPORT_AWAY:
rloc_engr(e);
break;
case SPE_STONE_TO_FLESH:
if (e->engr_type == ENGRAVE) {
/* only affects things in stone */
pline_The(Hallucination ?
"floor runs like butter!" :
"edges on the floor get smoother.");
wipe_engr_at(x, y, d(2,4));
}
break;
case WAN_STRIKING:
case SPE_FORCE_BOLT:
wipe_engr_at(x, y, d(2,4));
break;
default:
break;
}
}
}
return disclose;
}
#endif /*OVL3*/
#ifdef OVLB
/* called for various wand and spell effects - <NAME> */
void
weffects(obj)
register struct obj *obj;
{
int otyp = obj->otyp;
boolean disclose = FALSE, was_unkn = !objects[otyp].oc_name_known;
exercise(A_WIS, TRUE);
#ifdef STEED
if (u.usteed && (objects[otyp].oc_dir != NODIR) &&
!u.dx && !u.dy && (u.dz > 0) && zap_steed(obj)) {
disclose = TRUE;
} else
#endif
if (objects[otyp].oc_dir == IMMEDIATE) {
obj_zapped = FALSE;
if (u.uswallow) {
(void) bhitm(u.ustuck, obj);
/* [how about `bhitpile(u.ustuck->minvent)' effect?] */
} else if (u.dz) {
disclose = zap_updown(obj);
} else {
(void) bhit(u.dx,u.dy, rn1(8,6),ZAPPED_WAND, bhitm,bhito, obj);
}
/* give a clue if obj_zapped */
if (obj_zapped)
You_feel("shuddering vibrations.");
} else if (objects[otyp].oc_dir == NODIR) {
zapnodir(obj);
} else {
/* neither immediate nor directionless */
if (otyp == WAN_DIGGING || otyp == SPE_DIG)
zap_dig();
else if (otyp >= SPE_MAGIC_MISSILE && otyp <= SPE_FINGER_OF_DEATH)
buzz(otyp - SPE_MAGIC_MISSILE + 10,
u.ulevel / 2 + 1,
u.ux, u.uy, u.dx, u.dy);
else if (otyp >= WAN_MAGIC_MISSILE && otyp <= WAN_LIGHTNING)
buzz(otyp - WAN_MAGIC_MISSILE,
(otyp == WAN_MAGIC_MISSILE) ? 2 : 6,
u.ux, u.uy, u.dx, u.dy);
else
impossible("weffects: unexpected spell or wand");
disclose = TRUE;
}
if (disclose && was_unkn) {
makeknown(otyp);
more_experienced(0,10);
}
return;
}
#endif /*OVLB*/
#ifdef OVL0
/*
* Generate the to damage bonus for a spell. Based on the hero's intelligence
*/
int
spell_damage_bonus()
{
int tmp, intell = ACURR(A_INT);
/* Punish low intellegence before low level else low intellegence
gets punished only when high level */
if (intell < 10)
tmp = -3;
else if (u.ulevel < 5)
tmp = 0;
else if (intell < 14)
tmp = 0;
else if (intell <= 18)
tmp = 1;
else /* helm of brilliance */
tmp = 2;
return tmp;
}
/*
* Generate the to hit bonus for a spell. Based on the hero's skill in
* spell class and dexterity.
*/
STATIC_OVL int
spell_hit_bonus(skill)
int skill;
{
int hit_bon = 0;
int dex = ACURR(A_DEX);
switch (P_SKILL(spell_skilltype(skill))) {
case P_ISRESTRICTED:
case P_UNSKILLED: hit_bon = -4; break;
case P_BASIC: hit_bon = 0; break;
case P_SKILLED: hit_bon = 2; break;
case P_EXPERT: hit_bon = 3; break;
}
if (dex < 4)
hit_bon -= 3;
else if (dex < 6)
hit_bon -= 2;
else if (dex < 8)
hit_bon -= 1;
else if (dex < 14)
hit_bon -= 0; /* Will change when print stuff below removed */
else
hit_bon += dex - 14; /* Even increment for dextrous heroes (see weapon.c abon) */
return hit_bon;
}
const char *
exclam(force)
register int force;
{
/* force == 0 occurs e.g. with sleep ray */
/* note that large force is usual with wands so that !! would
require information about hand/weapon/wand */
return (const char *)((force < 0) ? "?" : (force <= 4) ? "." : "!");
}
void
hit(str,mtmp,force)
register const char *str;
register struct monst *mtmp;
register const char *force; /* usually either "." or "!" */
{
if((!cansee(bhitpos.x,bhitpos.y) && !canspotmon(mtmp) &&
!(u.uswallow && mtmp == u.ustuck))
|| !flags.verbose)
pline("%s %s it.", The(str), vtense(str, "hit"));
else pline("%s %s %s%s", The(str), vtense(str, "hit"),
mon_nam(mtmp), force);
}
void
miss(str,mtmp)
register const char *str;
register struct monst *mtmp;
{
pline("%s %s %s.", The(str), vtense(str, "miss"),
((cansee(bhitpos.x,bhitpos.y) || canspotmon(mtmp))
&& flags.verbose) ?
mon_nam(mtmp) : "it");
}
#endif /*OVL0*/
#ifdef OVL1
/*
* Called for the following distance effects:
* when a weapon is thrown (weapon == THROWN_WEAPON)
* when an object is kicked (KICKED_WEAPON)
* when an IMMEDIATE wand is zapped (ZAPPED_WAND)
* when a light beam is flashed (FLASHED_LIGHT)
* when a mirror is applied (INVIS_BEAM)
* A thrown/kicked object falls down at the end of its range or when a monster
* is hit. The variable 'bhitpos' is set to the final position of the weapon
* thrown/zapped. The ray of a wand may affect (by calling a provided
* function) several objects and monsters on its path. The return value
* is the monster hit (weapon != ZAPPED_WAND), or a null monster pointer.
*
* Check !u.uswallow before calling bhit().
* This function reveals the absence of a remembered invisible monster in
* necessary cases (throwing or kicking weapons). The presence of a real
* one is revealed for a weapon, but if not a weapon is left up to fhitm().
*/
struct monst *
bhit(ddx,ddy,range,weapon,fhitm,fhito,obj)
register int ddx,ddy,range; /* direction and range */
int weapon; /* see values in hack.h */
int FDECL((*fhitm), (MONST_P, OBJ_P)), /* fns called when mon/obj hit */
FDECL((*fhito), (OBJ_P, OBJ_P));
struct obj *obj; /* object tossed/used */
{
struct monst *mtmp;
uchar typ;
boolean shopdoor = FALSE, point_blank = TRUE;
if (weapon == KICKED_WEAPON) {
/* object starts one square in front of player */
bhitpos.x = u.ux + ddx;
bhitpos.y = u.uy + ddy;
range--;
} else {
bhitpos.x = u.ux;
bhitpos.y = u.uy;
}
if (weapon == FLASHED_LIGHT) {
tmp_at(DISP_BEAM, cmap_to_glyph(S_flashbeam));
} else if (weapon != ZAPPED_WAND && weapon != INVIS_BEAM)
tmp_at(DISP_FLASH, obj_to_glyph(obj));
while(range-- > 0) {
int x,y;
bhitpos.x += ddx;
bhitpos.y += ddy;
x = bhitpos.x; y = bhitpos.y;
if(!isok(x, y)) {
bhitpos.x -= ddx;
bhitpos.y -= ddy;
break;
}
if(is_pick(obj) && inside_shop(x, y) &&
(mtmp = shkcatch(obj, x, y))) {
tmp_at(DISP_END, 0);
return(mtmp);
}
typ = levl[bhitpos.x][bhitpos.y].typ;
/* iron bars will block anything big enough */
if ((weapon == THROWN_WEAPON || weapon == KICKED_WEAPON) &&
typ == IRONBARS &&
hits_bars(&obj, x - ddx, y - ddy,
point_blank ? 0 : !rn2(5), 1)) {
/* caveat: obj might now be null... */
bhitpos.x -= ddx;
bhitpos.y -= ddy;
break;
}
if (weapon == ZAPPED_WAND && find_drawbridge(&x,&y))
switch (obj->otyp) {
case WAN_OPENING:
case SPE_KNOCK:
if (is_db_wall(bhitpos.x, bhitpos.y)) {
if (cansee(x,y) || cansee(bhitpos.x,bhitpos.y))
makeknown(obj->otyp);
open_drawbridge(x,y);
}
break;
case WAN_LOCKING:
case SPE_WIZARD_LOCK:
if ((cansee(x,y) || cansee(bhitpos.x, bhitpos.y))
&& levl[x][y].typ == DRAWBRIDGE_DOWN)
makeknown(obj->otyp);
close_drawbridge(x,y);
break;
case WAN_STRIKING:
case SPE_FORCE_BOLT:
if (typ != DRAWBRIDGE_UP)
destroy_drawbridge(x,y);
makeknown(obj->otyp);
break;
}
if ((mtmp = m_at(bhitpos.x, bhitpos.y)) != 0) {
notonhead = (bhitpos.x != mtmp->mx ||
bhitpos.y != mtmp->my);
if (weapon != FLASHED_LIGHT) {
if(weapon != ZAPPED_WAND) {
if(weapon != INVIS_BEAM) tmp_at(DISP_END, 0);
if (cansee(bhitpos.x,bhitpos.y) && !canspotmon(mtmp)) {
if (weapon != INVIS_BEAM) {
map_invisible(bhitpos.x, bhitpos.y);
return(mtmp);
}
} else
return(mtmp);
}
if (weapon != INVIS_BEAM) {
(*fhitm)(mtmp, obj);
range -= 3;
}
} else {
/* FLASHED_LIGHT hitting invisible monster
should pass through instead of stop so
we call flash_hits_mon() directly rather
than returning mtmp back to caller. That
allows the flash to keep on going. Note
that we use mtmp->minvis not canspotmon()
because it makes no difference whether
the hero can see the monster or not.*/
if (mtmp->minvis) {
obj->ox = u.ux, obj->oy = u.uy;
(void) flash_hits_mon(mtmp, obj);
} else {
tmp_at(DISP_END, 0);
return(mtmp); /* caller will call flash_hits_mon */
}
}
} else {
if (weapon == ZAPPED_WAND && obj->otyp == WAN_PROBING &&
glyph_is_invisible(levl[bhitpos.x][bhitpos.y].glyph)) {
unmap_object(bhitpos.x, bhitpos.y);
newsym(x, y);
}
}
if(fhito) {
if(bhitpile(obj,fhito,bhitpos.x,bhitpos.y))
range--;
} else {
if(weapon == KICKED_WEAPON &&
((obj->oclass == COIN_CLASS &&
OBJ_AT(bhitpos.x, bhitpos.y)) ||
ship_object(obj, bhitpos.x, bhitpos.y,
costly_spot(bhitpos.x, bhitpos.y)))) {
tmp_at(DISP_END, 0);
return (struct monst *)0;
}
}
if(weapon == ZAPPED_WAND && (IS_DOOR(typ) || typ == SDOOR)) {
switch (obj->otyp) {
case WAN_OPENING:
case WAN_LOCKING:
case WAN_STRIKING:
case SPE_KNOCK:
case SPE_WIZARD_LOCK:
case SPE_FORCE_BOLT:
if (doorlock(obj, bhitpos.x, bhitpos.y)) {
if (cansee(bhitpos.x, bhitpos.y) ||
(obj->otyp == WAN_STRIKING))
makeknown(obj->otyp);
if (levl[bhitpos.x][bhitpos.y].doormask == D_BROKEN
&& *in_rooms(bhitpos.x, bhitpos.y, SHOPBASE)) {
shopdoor = TRUE;
add_damage(bhitpos.x, bhitpos.y, 400L);
}
}
break;
}
}
if(!ZAP_POS(typ) || closed_door(bhitpos.x, bhitpos.y)) {
bhitpos.x -= ddx;
bhitpos.y -= ddy;
break;
}
if(weapon != ZAPPED_WAND && weapon != INVIS_BEAM) {
/* 'I' present but no monster: erase */
/* do this before the tmp_at() */
if (glyph_is_invisible(levl[bhitpos.x][bhitpos.y].glyph)
&& cansee(x, y)) {
unmap_object(bhitpos.x, bhitpos.y);
newsym(x, y);
}
tmp_at(bhitpos.x, bhitpos.y);
delay_output();
/* kicked objects fall in pools */
if((weapon == KICKED_WEAPON) &&
(is_pool(bhitpos.x, bhitpos.y) ||
is_lava(bhitpos.x, bhitpos.y)))
break;
#ifdef SINKS
if(IS_SINK(typ) && weapon != FLASHED_LIGHT)
break; /* physical objects fall onto sink */
#endif
}
/* limit range of ball so hero won't make an invalid move */
if (weapon == THROWN_WEAPON && range > 0 &&
obj->otyp == HEAVY_IRON_BALL) {
struct obj *bobj;
struct trap *t;
if ((bobj = sobj_at(BOULDER, x, y)) != 0) {
if (cansee(x,y))
pline("%s hits %s.",
The(distant_name(obj, xname)), an(xname(bobj)));
range = 0;
} else if (obj == uball) {
if (!test_move(x - ddx, y - ddy, ddx, ddy, TEST_MOVE)) {
/* nb: it didn't hit anything directly */
if (cansee(x,y))
pline("%s jerks to an abrupt halt.",
The(distant_name(obj, xname))); /* lame */
range = 0;
} else if (In_sokoban(&u.uz) && (t = t_at(x, y)) != 0 &&
(t->ttyp == PIT || t->ttyp == SPIKED_PIT ||
t->ttyp == HOLE || t->ttyp == TRAPDOOR)) {
/* hero falls into the trap, so ball stops */
range = 0;
}
}
}
/* thrown/kicked missile has moved away from its starting spot */
point_blank = FALSE; /* affects passing through iron bars */
}
if (weapon != ZAPPED_WAND && weapon != INVIS_BEAM) tmp_at(DISP_END, 0);
if(shopdoor)
pay_for_damage("destroy", FALSE);
return (struct monst *)0;
}
struct monst *
boomhit(dx, dy)
int dx, dy;
{
register int i, ct;
int boom = S_boomleft; /* showsym[] index */
struct monst *mtmp;
bhitpos.x = u.ux;
bhitpos.y = u.uy;
for (i = 0; i < 8; i++) if (xdir[i] == dx && ydir[i] == dy) break;
tmp_at(DISP_FLASH, cmap_to_glyph(boom));
for (ct = 0; ct < 10; ct++) {
if(i == 8) i = 0;
boom = (boom == S_boomleft) ? S_boomright : S_boomleft;
tmp_at(DISP_CHANGE, cmap_to_glyph(boom));/* change glyph */
dx = xdir[i];
dy = ydir[i];
bhitpos.x += dx;
bhitpos.y += dy;
if(MON_AT(bhitpos.x, bhitpos.y)) {
mtmp = m_at(bhitpos.x,bhitpos.y);
m_respond(mtmp);
tmp_at(DISP_END, 0);
return(mtmp);
}
if(!ZAP_POS(levl[bhitpos.x][bhitpos.y].typ) ||
closed_door(bhitpos.x, bhitpos.y)) {
bhitpos.x -= dx;
bhitpos.y -= dy;
break;
}
if(bhitpos.x == u.ux && bhitpos.y == u.uy) { /* ct == 9 */
if(Fumbling || rn2(20) >= ACURR(A_DEX)) {
/* we hit ourselves */
(void) thitu(10, rnd(10), (struct obj *)0,
"boomerang");
break;
} else { /* we catch it */
tmp_at(DISP_END, 0);
You("skillfully catch the boomerang.");
return(&youmonst);
}
}
tmp_at(bhitpos.x, bhitpos.y);
delay_output();
if(ct % 5 != 0) i++;
#ifdef SINKS
if(IS_SINK(levl[bhitpos.x][bhitpos.y].typ))
break; /* boomerang falls on sink */
#endif
}
tmp_at(DISP_END, 0); /* do not leave last symbol */
return (struct monst *)0;
}
STATIC_OVL int
zhitm(mon, type, nd, ootmp) /* returns damage to mon */
register struct monst *mon;
register int type, nd;
struct obj **ootmp; /* to return worn armor for caller to disintegrate */
{
register int tmp = 0;
register int abstype = abs(type) % 10;
boolean sho_shieldeff = FALSE;
boolean spellcaster = is_hero_spell(type); /* maybe get a bonus! */
*ootmp = (struct obj *)0;
switch(abstype) {
case ZT_MAGIC_MISSILE:
if (resists_magm(mon)) {
sho_shieldeff = TRUE;
break;
}
tmp = d(nd,6);
if (spellcaster)
tmp += spell_damage_bonus();
#ifdef WIZ_PATCH_DEBUG
if (spellcaster)
pline("Damage = %d + %d", tmp-spell_damage_bonus(),
spell_damage_bonus());
#endif
break;
case ZT_FIRE:
if (resists_fire(mon)) {
sho_shieldeff = TRUE;
break;
}
tmp = d(nd,6);
if (resists_cold(mon)) tmp += 7;
if (spellcaster)
tmp += spell_damage_bonus();
#ifdef WIZ_PATCH_DEBUG
if (spellcaster)
pline("Damage = %d + %d",tmp-spell_damage_bonus(),
spell_damage_bonus());
#endif
if (burnarmor(mon)) {
if (!rn2(3)) (void)destroy_mitem(mon, POTION_CLASS, AD_FIRE);
if (!rn2(3)) (void)destroy_mitem(mon, SCROLL_CLASS, AD_FIRE);
if (!rn2(5)) (void)destroy_mitem(mon, SPBOOK_CLASS, AD_FIRE);
}
break;
case ZT_COLD:
if (resists_cold(mon)) {
sho_shieldeff = TRUE;
break;
}
tmp = d(nd,6);
if (resists_fire(mon)) tmp += d(nd, 3);
if (spellcaster)
tmp += spell_damage_bonus();
#ifdef WIZ_PATCH_DEBUG
if (spellcaster)
pline("Damage = %d + %d", tmp-spell_damage_bonus(),
spell_damage_bonus());
#endif
if (!rn2(3)) (void)destroy_mitem(mon, POTION_CLASS, AD_COLD);
break;
case ZT_SLEEP:
tmp = 0;
(void)sleep_monst(mon, d(nd, 25),
type == ZT_WAND(ZT_SLEEP) ? WAND_CLASS : '\0');
break;
case ZT_DEATH: /* death/disintegration */
if(abs(type) != ZT_BREATH(ZT_DEATH)) { /* death */
if(mon->data == &mons[PM_DEATH]) {
mon->mhpmax += mon->mhpmax/2;
if (mon->mhpmax >= MAGIC_COOKIE)
mon->mhpmax = MAGIC_COOKIE - 1;
mon->mhp = mon->mhpmax;
tmp = 0;
break;
}
if (nonliving(mon->data) || is_demon(mon->data) ||
resists_magm(mon)) { /* similar to player */
sho_shieldeff = TRUE;
break;
}
type = -1; /* so they don't get saving throws */
} else {
struct obj *otmp2;
if (resists_disint(mon)) {
sho_shieldeff = TRUE;
} else if (mon->misc_worn_check & W_ARMS) {
/* destroy shield; victim survives */
*ootmp = which_armor(mon, W_ARMS);
} else if (mon->misc_worn_check & W_ARM) {
/* destroy body armor, also cloak if present */
*ootmp = which_armor(mon, W_ARM);
if ((otmp2 = which_armor(mon, W_ARMC)) != 0)
m_useup(mon, otmp2);
} else {
/* no body armor, victim dies; destroy cloak
and shirt now in case target gets life-saved */
tmp = MAGIC_COOKIE;
if ((otmp2 = which_armor(mon, W_ARMC)) != 0)
m_useup(mon, otmp2);
#ifdef TOURIST
if ((otmp2 = which_armor(mon, W_ARMU)) != 0)
m_useup(mon, otmp2);
#endif
}
type = -1; /* no saving throw wanted */
break; /* not ordinary damage */
}
tmp = mon->mhp+1;
break;
case ZT_LIGHTNING:
if (resists_elec(mon)) {
sho_shieldeff = TRUE;
tmp = 0;
/* can still blind the monster */
} else
tmp = d(nd,6);
if (spellcaster)
tmp += spell_damage_bonus();
#ifdef WIZ_PATCH_DEBUG
if (spellcaster)
pline("Damage = %d + %d", tmp-spell_damage_bonus(),
spell_damage_bonus());
#endif
if (!resists_blnd(mon) &&
!(type > 0 && u.uswallow && mon == u.ustuck)) {
register unsigned rnd_tmp = rnd(50);
mon->mcansee = 0;
if((mon->mblinded + rnd_tmp) > 127)
mon->mblinded = 127;
else mon->mblinded += rnd_tmp;
}
if (!rn2(3)) (void)destroy_mitem(mon, WAND_CLASS, AD_ELEC);
/* not actually possible yet */
if (!rn2(3)) (void)destroy_mitem(mon, RING_CLASS, AD_ELEC);
break;
case ZT_POISON_GAS:
if (resists_poison(mon)) {
sho_shieldeff = TRUE;
break;
}
tmp = d(nd,6);
break;
case ZT_ACID:
if (resists_acid(mon)) {
sho_shieldeff = TRUE;
break;
}
tmp = d(nd,6);
if (!rn2(6)) erode_obj(MON_WEP(mon), TRUE, TRUE);
if (!rn2(6)) erode_armor(mon, TRUE);
break;
}
if (sho_shieldeff) shieldeff(mon->mx, mon->my);
if (is_hero_spell(type) && (Role_if(PM_KNIGHT) && u.uhave.questart))
tmp *= 2;
if (tmp > 0 && type >= 0 &&
resist(mon, type < ZT_SPELL(0) ? WAND_CLASS : '\0', 0, NOTELL))
tmp /= 2;
if (tmp < 0) tmp = 0; /* don't allow negative damage */
#ifdef WIZ_PATCH_DEBUG
pline("zapped monster hp = %d (= %d - %d)", mon->mhp-tmp,mon->mhp,tmp);
#endif
mon->mhp -= tmp;
return(tmp);
}
STATIC_OVL void
zhitu(type, nd, fltxt, sx, sy)
int type, nd;
const char *fltxt;
xchar sx, sy;
{
int dam = 0;
switch (abs(type) % 10) {
case ZT_MAGIC_MISSILE:
if (Antimagic) {
shieldeff(sx, sy);
pline_The("missiles bounce off!");
} else {
dam = d(nd,6);
exercise(A_STR, FALSE);
}
break;
case ZT_FIRE:
if (Fire_resistance) {
shieldeff(sx, sy);
You("don't feel hot!");
ugolemeffects(AD_FIRE, d(nd, 6));
} else {
dam = d(nd, 6);
}
burn_away_slime();
if (burnarmor(&youmonst)) { /* "body hit" */
if (!rn2(3)) destroy_item(POTION_CLASS, AD_FIRE);
if (!rn2(3)) destroy_item(SCROLL_CLASS, AD_FIRE);
if (!rn2(5)) destroy_item(SPBOOK_CLASS, AD_FIRE);
}
break;
case ZT_COLD:
if (Cold_resistance) {
shieldeff(sx, sy);
You("don't feel cold.");
ugolemeffects(AD_COLD, d(nd, 6));
} else {
dam = d(nd, 6);
}
if (!rn2(3)) destroy_item(POTION_CLASS, AD_COLD);
break;
case ZT_SLEEP:
if (Sleep_resistance) {
shieldeff(u.ux, u.uy);
You("don't feel sleepy.");
} else {
fall_asleep(-d(nd,25), TRUE); /* sleep ray */
}
break;
case ZT_DEATH:
if (abs(type) == ZT_BREATH(ZT_DEATH)) {
if (Disint_resistance) {
You("are not disintegrated.");
break;
} else if (uarms) {
/* destroy shield; other possessions are safe */
(void) destroy_arm(uarms);
break;
} else if (uarm) {
/* destroy suit; if present, cloak goes too */
if (uarmc) (void) destroy_arm(uarmc);
(void) destroy_arm(uarm);
break;
}
/* no shield or suit, you're dead; wipe out cloak
and/or shirt in case of life-saving or bones */
if (uarmc) (void) destroy_arm(uarmc);
#ifdef TOURIST
if (uarmu) (void) destroy_arm(uarmu);
#endif
} else if (nonliving(youmonst.data) || is_demon(youmonst.data)) {
shieldeff(sx, sy);
You("seem unaffected.");
break;
} else if (Antimagic) {
shieldeff(sx, sy);
You("aren't affected.");
break;
}
killer_format = KILLED_BY_AN;
killer = fltxt;
/* when killed by disintegration breath, don't leave corpse */
u.ugrave_arise = (type == -ZT_BREATH(ZT_DEATH)) ? -3 : NON_PM;
done(DIED);
return; /* lifesaved */
case ZT_LIGHTNING:
if (Shock_resistance) {
shieldeff(sx, sy);
You("aren't affected.");
ugolemeffects(AD_ELEC, d(nd, 6));
} else {
dam = d(nd, 6);
exercise(A_CON, FALSE);
}
if (!rn2(3)) destroy_item(WAND_CLASS, AD_ELEC);
if (!rn2(3)) destroy_item(RING_CLASS, AD_ELEC);
break;
case ZT_POISON_GAS:
poisoned("blast", A_DEX, "poisoned blast", 15);
break;
case ZT_ACID:
if (Acid_resistance) {
dam = 0;
} else {
pline_The("acid burns!");
dam = d(nd,6);
exercise(A_STR, FALSE);
}
/* using two weapons at once makes both of them more vulnerable */
if (!rn2(u.twoweap ? 3 : 6)) erode_obj(uwep, TRUE, TRUE);
if (u.twoweap && !rn2(3)) erode_obj(uswapwep, TRUE, TRUE);
if (!rn2(6)) erode_armor(&youmonst, TRUE);
break;
}
if (Half_spell_damage && dam &&
type < 0 && (type > -20 || type < -29)) /* !Breath */
dam = (dam + 1) / 2;
losehp(dam, fltxt, KILLED_BY_AN);
return;
}
#endif /*OVL1*/
#ifdef OVLB
/*
* burn scrolls and spellbooks on floor at position x,y
* return the number of scrolls and spellbooks burned
*/
int
burn_floor_paper(x, y, give_feedback, u_caused)
int x, y;
boolean give_feedback; /* caller needs to decide about visibility checks */
boolean u_caused;
{
struct obj *obj, *obj2;
long i, scrquan, delquan;
char buf1[BUFSZ], buf2[BUFSZ];
int cnt = 0;
for (obj = level.objects[x][y]; obj; obj = obj2) {
obj2 = obj->nexthere;
if (obj->oclass == SCROLL_CLASS || obj->oclass == SPBOOK_CLASS) {
if (obj->otyp == SCR_FIRE || obj->otyp == SPE_FIREBALL ||
obj_resists(obj, 2, 100))
continue;
scrquan = obj->quan; /* number present */
delquan = 0; /* number to destroy */
for (i = scrquan; i > 0; i--)
if (!rn2(3)) delquan++;
if (delquan) {
/* save name before potential delobj() */
if (give_feedback) {
obj->quan = 1;
Strcpy(buf1, (x == u.ux && y == u.uy) ?
xname(obj) : distant_name(obj, xname));
obj->quan = 2;
Strcpy(buf2, (x == u.ux && y == u.uy) ?
xname(obj) : distant_name(obj, xname));
obj->quan = scrquan;
}
/* useupf(), which charges, only if hero caused damage */
if (u_caused) useupf(obj, delquan);
else if (delquan < scrquan) obj->quan -= delquan;
else delobj(obj);
cnt += delquan;
if (give_feedback) {
if (delquan > 1)
pline("%ld %s burn.", delquan, buf2);
else
pline("%s burns.", An(buf1));
}
}
}
}
return cnt;
}
/* will zap/spell/breath attack score a hit against armor class `ac'? */
STATIC_OVL int
zap_hit(ac, type)
int ac;
int type; /* either hero cast spell type or 0 */
{
int chance = rn2(20);
int spell_bonus = type ? spell_hit_bonus(type) : 0;
/* small chance for naked target to avoid being hit */
if (!chance) return rnd(10) < ac+spell_bonus;
/* very high armor protection does not achieve invulnerability */
ac = AC_VALUE(ac);
return (3 - chance) < ac+spell_bonus;
}
/* type == 0 to 9 : you shooting a wand */
/* type == 10 to 19 : you casting a spell */
/* type == 20 to 29 : you breathing as a monster */
/* type == -10 to -19 : monster casting spell */
/* type == -20 to -29 : monster breathing at you */
/* type == -30 to -39 : monster shooting a wand */
/* called with dx = dy = 0 with vertical bolts */
void
buzz(type,nd,sx,sy,dx,dy)
register int type, nd;
register xchar sx,sy;
register int dx,dy;
{
int range, abstype = abs(type) % 10;
struct rm *lev;
register xchar lsx, lsy;
struct monst *mon;
coord save_bhitpos;
boolean shopdamage = FALSE;
register const char *fltxt;
struct obj *otmp;
int spell_type;
/* if its a Hero Spell then get its SPE_TYPE */
spell_type = is_hero_spell(type) ? SPE_MAGIC_MISSILE + abstype : 0;
fltxt = flash_types[(type <= -30) ? abstype : abs(type)];
if(u.uswallow) {
register int tmp;
if(type < 0) return;
tmp = zhitm(u.ustuck, type, nd, &otmp);
if(!u.ustuck) u.uswallow = 0;
else pline("%s rips into %s%s",
The(fltxt), mon_nam(u.ustuck), exclam(tmp));
/* Using disintegration from the inside only makes a hole... */
if (tmp == MAGIC_COOKIE)
u.ustuck->mhp = 0;
if (u.ustuck->mhp < 1)
killed(u.ustuck);
return;
}
if(type < 0) newsym(u.ux,u.uy);
range = rn1(7,7);
if(dx == 0 && dy == 0) range = 1;
save_bhitpos = bhitpos;
tmp_at(DISP_BEAM, zapdir_to_glyph(dx, dy, abstype));
while(range-- > 0) {
lsx = sx; sx += dx;
lsy = sy; sy += dy;
if(isok(sx,sy) && (lev = &levl[sx][sy])->typ) {
mon = m_at(sx, sy);
if(cansee(sx,sy)) {
/* reveal/unreveal invisible monsters before tmp_at() */
if (mon && !canspotmon(mon))
map_invisible(sx, sy);
else if (!mon && glyph_is_invisible(levl[sx][sy].glyph)) {
unmap_object(sx, sy);
newsym(sx, sy);
}
if(ZAP_POS(lev->typ) || cansee(lsx,lsy))
tmp_at(sx,sy);
delay_output(); /* wait a little */
}
} else
goto make_bounce;
/* hit() and miss() need bhitpos to match the target */
bhitpos.x = sx, bhitpos.y = sy;
/* Fireballs only damage when they explode */
if (type != ZT_SPELL(ZT_FIRE))
range += zap_over_floor(sx, sy, type, &shopdamage);
if (mon) {
if (type == ZT_SPELL(ZT_FIRE)) break;
if (type >= 0) mon->mstrategy &= ~STRAT_WAITMASK;
#ifdef STEED
buzzmonst:
#endif
if (zap_hit(find_mac(mon), spell_type)) {
if (mon_reflects(mon, (char *)0)) {
if(cansee(mon->mx,mon->my)) {
hit(fltxt, mon, exclam(0));
shieldeff(mon->mx, mon->my);
(void) mon_reflects(mon, "But it reflects from %s %s!");
}
dx = -dx;
dy = -dy;
} else {
boolean mon_could_move = mon->mcanmove;
int tmp = zhitm(mon, type, nd, &otmp);
if (is_rider(mon->data) && abs(type) == ZT_BREATH(ZT_DEATH)) {
if (canseemon(mon)) {
hit(fltxt, mon, ".");
pline("%s disintegrates.", Monnam(mon));
pline("%s body reintegrates before your %s!",
s_suffix(Monnam(mon)),
(eyecount(youmonst.data) == 1) ?
body_part(EYE) : makeplural(body_part(EYE)));
pline("%s resurrects!", Monnam(mon));
}
mon->mhp = mon->mhpmax;
break; /* Out of while loop */
}
if (mon->data == &mons[PM_DEATH] && abstype == ZT_DEATH) {
if (canseemon(mon)) {
hit(fltxt, mon, ".");
pline("%s absorbs the deadly %s!", Monnam(mon),
type == ZT_BREATH(ZT_DEATH) ?
"blast" : "ray");
pline("It seems even stronger than before.");
}
break; /* Out of while loop */
}
if (tmp == MAGIC_COOKIE) { /* disintegration */
struct obj *otmp2, *m_amulet = mlifesaver(mon);
if (canseemon(mon)) {
if (!m_amulet)
pline("%s is disintegrated!", Monnam(mon));
else
hit(fltxt, mon, "!");
}
#ifndef GOLDOBJ
mon->mgold = 0L;
#endif
/* note: worn amulet of life saving must be preserved in order to operate */
#define oresist_disintegration(obj) \
(objects[obj->otyp].oc_oprop == DISINT_RES || \
obj_resists(obj, 5, 50) || is_quest_artifact(obj) || \
obj == m_amulet)
for (otmp = mon->minvent; otmp; otmp = otmp2) {
otmp2 = otmp->nobj;
if (!oresist_disintegration(otmp)) {
obj_extract_self(otmp);
obfree(otmp, (struct obj *)0);
}
}
if (type < 0)
monkilled(mon, (char *)0, -AD_RBRE);
else
xkilled(mon, 2);
} else if(mon->mhp < 1) {
if(type < 0)
monkilled(mon, fltxt, AD_RBRE);
else
killed(mon);
} else {
if (!otmp) {
/* normal non-fatal hit */
hit(fltxt, mon, exclam(tmp));
} else {
/* some armor was destroyed; no damage done */
if (canseemon(mon))
pline("%s %s is disintegrated!",
s_suffix(Monnam(mon)),
distant_name(otmp, xname));
m_useup(mon, otmp);
}
if (mon_could_move && !mon->mcanmove) /* ZT_SLEEP */
slept_monst(mon);
}
}
range -= 2;
} else {
miss(fltxt,mon);
}
} else if (sx == u.ux && sy == u.uy && range >= 0) {
nomul(0);
#ifdef STEED
if (u.usteed && !rn2(3) && !mon_reflects(u.usteed, (char *)0)) {
mon = u.usteed;
goto buzzmonst;
} else
#endif
if (zap_hit((int) u.uac, 0)) {
range -= 2;
pline("%s hits you!", The(fltxt));
if (Reflecting) {
if (!Blind) {
(void) ureflects("But %s reflects from your %s!", "it");
} else
pline("For some reason you are not affected.");
dx = -dx;
dy = -dy;
shieldeff(sx, sy);
} else {
zhitu(type, nd, fltxt, sx, sy);
}
} else {
pline("%s whizzes by you!", The(fltxt));
}
if (abstype == ZT_LIGHTNING && !resists_blnd(&youmonst)) {
You(are_blinded_by_the_flash);
make_blinded((long)d(nd,50),FALSE);
if (!Blind) Your(vision_clears);
}
stop_occupation();
nomul(0);
}
if(!ZAP_POS(lev->typ) || (closed_door(sx, sy) && (range >= 0))) {
int bounce;
uchar rmn;
make_bounce:
if (type == ZT_SPELL(ZT_FIRE)) {
sx = lsx;
sy = lsy;
break; /* fireballs explode before the wall */
}
bounce = 0;
range--;
if(range && isok(lsx, lsy) && cansee(lsx,lsy))
pline("%s bounces!", The(fltxt));
if(!dx || !dy || !rn2(20)) {
dx = -dx;
dy = -dy;
} else {
if(isok(sx,lsy) && ZAP_POS(rmn = levl[sx][lsy].typ) &&
!closed_door(sx,lsy) &&
(IS_ROOM(rmn) || (isok(sx+dx,lsy) &&
ZAP_POS(levl[sx+dx][lsy].typ))))
bounce = 1;
if(isok(lsx,sy) && ZAP_POS(rmn = levl[lsx][sy].typ) &&
!closed_door(lsx,sy) &&
(IS_ROOM(rmn) || (isok(lsx,sy+dy) &&
ZAP_POS(levl[lsx][sy+dy].typ))))
if(!bounce || rn2(2))
bounce = 2;
switch(bounce) {
case 0: dx = -dx; /* fall into... */
case 1: dy = -dy; break;
case 2: dx = -dx; break;
}
tmp_at(DISP_CHANGE, zapdir_to_glyph(dx,dy,abstype));
}
}
}
tmp_at(DISP_END,0);
if (type == ZT_SPELL(ZT_FIRE))
explode(sx, sy, type, d(12,6), 0, EXPL_FIERY);
if (shopdamage)
pay_for_damage(abstype == ZT_FIRE ? "burn away" :
abstype == ZT_COLD ? "shatter" :
abstype == ZT_DEATH ? "disintegrate" : "destroy", FALSE);
bhitpos = save_bhitpos;
}
#endif /*OVLB*/
#ifdef OVL0
void
melt_ice(x, y)
xchar x, y;
{
struct rm *lev = &levl[x][y];
struct obj *otmp;
if (lev->typ == DRAWBRIDGE_UP)
lev->drawbridgemask &= ~DB_ICE; /* revert to DB_MOAT */
else { /* lev->typ == ICE */
#ifdef STUPID
if (lev->icedpool == ICED_POOL) lev->typ = POOL;
else lev->typ = MOAT;
#else
lev->typ = (lev->icedpool == ICED_POOL ? POOL : MOAT);
#endif
lev->icedpool = 0;
}
obj_ice_effects(x, y, FALSE);
unearth_objs(x, y);
if (Underwater) vision_recalc(1);
newsym(x,y);
if (cansee(x,y)) Norep("The ice crackles and melts.");
if ((otmp = sobj_at(BOULDER, x, y)) != 0) {
if (cansee(x,y)) pline("%s settles...", An(xname(otmp)));
do {
obj_extract_self(otmp); /* boulder isn't being pushed */
if (!boulder_hits_pool(otmp, x, y, FALSE))
impossible("melt_ice: no pool?");
/* try again if there's another boulder and pool didn't fill */
} while (is_pool(x,y) && (otmp = sobj_at(BOULDER, x, y)) != 0);
newsym(x,y);
}
if (x == u.ux && y == u.uy)
spoteffects(TRUE); /* possibly drown, notice objects */
}
/* Burn floor scrolls, evaporate pools, etc... in a single square. Used
* both for normal bolts of fire, cold, etc... and for fireballs.
* Sets shopdamage to TRUE if a shop door is destroyed, and returns the
* amount by which range is reduced (the latter is just ignored by fireballs)
*/
int
zap_over_floor(x, y, type, shopdamage)
xchar x, y;
int type;
boolean *shopdamage;
{
struct monst *mon;
int abstype = abs(type) % 10;
struct rm *lev = &levl[x][y];
int rangemod = 0;
if(abstype == ZT_FIRE) {
struct trap *t = t_at(x, y);
if (t && t->ttyp == WEB) {
/* a burning web is too flimsy to notice if you can't see it */
if (cansee(x,y)) Norep("A web bursts into flames!");
(void) delfloortrap(t);
if (cansee(x,y)) newsym(x,y);
}
if(is_ice(x, y)) {
melt_ice(x, y);
} else if(is_pool(x,y)) {
const char *msgtxt = "You hear hissing gas.";
if(lev->typ != POOL) { /* MOAT or DRAWBRIDGE_UP */
if (cansee(x,y)) msgtxt = "Some water evaporates.";
} else {
register struct trap *ttmp;
rangemod -= 3;
lev->typ = ROOM;
ttmp = maketrap(x, y, PIT);
if (ttmp) ttmp->tseen = 1;
if (cansee(x,y)) msgtxt = "The water evaporates.";
}
Norep(msgtxt);
if (lev->typ == ROOM) newsym(x,y);
} else if(IS_FOUNTAIN(lev->typ)) {
if (cansee(x,y))
pline("Steam billows from the fountain.");
rangemod -= 1;
dryup(x, y, type > 0);
}
}
else if(abstype == ZT_COLD && (is_pool(x,y) || is_lava(x,y))) {
boolean lava = is_lava(x,y);
boolean moat = (!lava && (lev->typ != POOL) &&
(lev->typ != WATER) &&
!Is_medusa_level(&u.uz) &&
!Is_waterlevel(&u.uz));
if (lev->typ == WATER) {
/* For now, don't let WATER freeze. */
if (cansee(x,y))
pline_The("water freezes for a moment.");
else
You_hear("a soft crackling.");
rangemod -= 1000; /* stop */
} else {
rangemod -= 3;
if (lev->typ == DRAWBRIDGE_UP) {
lev->drawbridgemask &= ~DB_UNDER; /* clear lava */
lev->drawbridgemask |= (lava ? DB_FLOOR : DB_ICE);
} else {
if (!lava)
lev->icedpool =
(lev->typ == POOL ? ICED_POOL : ICED_MOAT);
lev->typ = (lava ? ROOM : ICE);
}
bury_objs(x,y);
if(cansee(x,y)) {
if(moat)
Norep("The moat is bridged with ice!");
else if(lava)
Norep("The lava cools and solidifies.");
else
Norep("The water freezes.");
newsym(x,y);
} else if(flags.soundok && !lava)
You_hear("a crackling sound.");
if (x == u.ux && y == u.uy) {
if (u.uinwater) { /* not just `if (Underwater)' */
/* leave the no longer existent water */
u.uinwater = 0;
u.uundetected = 0;
docrt();
vision_full_recalc = 1;
} else if (u.utrap && u.utraptype == TT_LAVA) {
if (Passes_walls) {
You("pass through the now-solid rock.");
} else {
u.utrap = rn1(50,20);
u.utraptype = TT_INFLOOR;
You("are firmly stuck in the cooling rock.");
}
}
} else if ((mon = m_at(x,y)) != 0) {
/* probably ought to do some hefty damage to any
non-ice creature caught in freezing water;
at a minimum, eels are forced out of hiding */
if (is_swimmer(mon->data) && mon->mundetected) {
mon->mundetected = 0;
newsym(x,y);
}
}
}
obj_ice_effects(x,y,TRUE);
}
if(closed_door(x, y)) {
int new_doormask = -1;
const char *see_txt = 0, *sense_txt = 0, *hear_txt = 0;
rangemod = -1000;
switch(abstype) {
case ZT_FIRE:
new_doormask = D_NODOOR;
see_txt = "The door is consumed in flames!";
sense_txt = "smell smoke.";
break;
case ZT_COLD:
new_doormask = D_NODOOR;
see_txt = "The door freezes and shatters!";
sense_txt = "feel cold.";
break;
case ZT_DEATH:
/* death spells/wands don't disintegrate */
if(abs(type) != ZT_BREATH(ZT_DEATH))
goto def_case;
new_doormask = D_NODOOR;
see_txt = "The door disintegrates!";
hear_txt = "crashing wood.";
break;
case ZT_LIGHTNING:
new_doormask = D_BROKEN;
see_txt = "The door splinters!";
hear_txt = "crackling.";
break;
default:
def_case:
if(cansee(x,y)) {
pline_The("door absorbs %s %s!",
(type < 0) ? "the" : "your",
abs(type) < ZT_SPELL(0) ? "bolt" :
abs(type) < ZT_BREATH(0) ? "spell" :
"blast");
} else You_feel("vibrations.");
break;
}
if (new_doormask >= 0) { /* door gets broken */
if (*in_rooms(x, y, SHOPBASE)) {
if (type >= 0) {
add_damage(x, y, 400L);
*shopdamage = TRUE;
} else /* caused by monster */
add_damage(x, y, 0L);
}
lev->doormask = new_doormask;
unblock_point(x, y); /* vision */
if (cansee(x, y)) {
pline(see_txt);
newsym(x, y);
} else if (sense_txt) {
You(sense_txt);
} else if (hear_txt) {
if (flags.soundok) You_hear(hear_txt);
}
if (picking_at(x, y)) {
stop_occupation();
reset_pick();
}
}
}
if(OBJ_AT(x, y) && abstype == ZT_FIRE)
if (burn_floor_paper(x, y, FALSE, type > 0) && couldsee(x, y)) {
newsym(x,y);
You("%s of smoke.",
!Blind ? "see a puff" : "smell a whiff");
}
if ((mon = m_at(x,y)) != 0) {
/* Cannot use wakeup() which also angers the monster */
mon->msleeping = 0;
if(mon->m_ap_type) seemimic(mon);
if(type >= 0) {
setmangry(mon);
if(mon->ispriest && *in_rooms(mon->mx, mon->my, TEMPLE))
ghod_hitsu(mon);
if(mon->isshk && !*u.ushops)
hot_pursuit(mon);
}
}
return rangemod;
}
#endif /*OVL0*/
#ifdef OVL3
void
fracture_rock(obj) /* fractured by pick-axe or wand of striking */
register struct obj *obj; /* no texts here! */
{
/* A little Sokoban guilt... */
if (obj->otyp == BOULDER && In_sokoban(&u.uz) && !flags.mon_moving)
change_luck(-1);
obj->otyp = ROCK;
obj->quan = (long) rn1(60, 7);
obj->owt = weight(obj);
obj->oclass = GEM_CLASS;
obj->known = FALSE;
obj->onamelth = 0; /* no names */
obj->oxlth = 0; /* no extra data */
obj->oattached = OATTACHED_NOTHING;
if (obj->where == OBJ_FLOOR) {
obj_extract_self(obj); /* move rocks back on top */
place_object(obj, obj->ox, obj->oy);
if(!does_block(obj->ox,obj->oy,&levl[obj->ox][obj->oy]))
unblock_point(obj->ox,obj->oy);
if(cansee(obj->ox,obj->oy))
newsym(obj->ox,obj->oy);
}
}
/* handle statue hit by striking/force bolt/pick-axe */
boolean
break_statue(obj)
register struct obj *obj;
{
/* [obj is assumed to be on floor, so no get_obj_location() needed] */
struct trap *trap = t_at(obj->ox, obj->oy);
struct obj *item;
if (trap && trap->ttyp == STATUE_TRAP &&
activate_statue_trap(trap, obj->ox, obj->oy, TRUE))
return FALSE;
/* drop any objects contained inside the statue */
while ((item = obj->cobj) != 0) {
obj_extract_self(item);
place_object(item, obj->ox, obj->oy);
}
if (Role_if(PM_ARCHEOLOGIST) && !flags.mon_moving && (obj->spe & STATUE_HISTORIC)) {
You_feel("guilty about damaging such a historic statue.");
adjalign(-1);
}
obj->spe = 0;
fracture_rock(obj);
return TRUE;
}
const char * const destroy_strings[] = { /* also used in trap.c */
"freezes and shatters", "freeze and shatter", "shattered potion",
"boils and explodes", "boil and explode", "boiling potion",
"catches fire and burns", "catch fire and burn", "burning scroll",
"catches fire and burns", "catch fire and burn", "burning book",
"turns to dust and vanishes", "turn to dust and vanish", "",
"breaks apart and explodes", "break apart and explode", "exploding wand"
};
void
destroy_item(osym, dmgtyp)
register int osym, dmgtyp;
{
register struct obj *obj, *obj2;
register int dmg, xresist, skip;
register long i, cnt, quan;
register int dindx;
const char *mult;
for(obj = invent; obj; obj = obj2) {
obj2 = obj->nobj;
if(obj->oclass != osym) continue; /* test only objs of type osym */
if(obj->oartifact) continue; /* don't destroy artifacts */
if(obj->in_use && obj->quan == 1) continue; /* not available */
xresist = skip = 0;
#ifdef GCC_WARN
dmg = dindx = 0;
quan = 0L;
#endif
switch(dmgtyp) {
case AD_COLD:
if(osym == POTION_CLASS && obj->otyp != POT_OIL) {
quan = obj->quan;
dindx = 0;
dmg = rnd(4);
} else skip++;
break;
case AD_FIRE:
xresist = (Fire_resistance && obj->oclass != POTION_CLASS);
if (obj->otyp == SCR_FIRE || obj->otyp == SPE_FIREBALL)
skip++;
if (obj->otyp == SPE_BOOK_OF_THE_DEAD) {
skip++;
if (!Blind)
pline("%s glows a strange %s, but remains intact.",
The(xname(obj)), hcolor("dark red"));
}
quan = obj->quan;
switch(osym) {
case POTION_CLASS:
dindx = 1;
dmg = rnd(6);
break;
case SCROLL_CLASS:
dindx = 2;
dmg = 1;
break;
case SPBOOK_CLASS:
dindx = 3;
dmg = 1;
break;
default:
skip++;
break;
}
break;
case AD_ELEC:
xresist = (Shock_resistance && obj->oclass != RING_CLASS);
quan = obj->quan;
switch(osym) {
case RING_CLASS:
if(obj->otyp == RIN_SHOCK_RESISTANCE)
{ skip++; break; }
dindx = 4;
dmg = 0;
break;
case WAND_CLASS:
if(obj->otyp == WAN_LIGHTNING) { skip++; break; }
#if 0
if (obj == current_wand) { skip++; break; }
#endif
dindx = 5;
dmg = rnd(10);
break;
default:
skip++;
break;
}
break;
default:
skip++;
break;
}
if(!skip) {
if (obj->in_use) --quan; /* one will be used up elsewhere */
for(i = cnt = 0L; i < quan; i++)
if(!rn2(3)) cnt++;
if(!cnt) continue;
if(cnt == quan) mult = "Your";
else mult = (cnt == 1L) ? "One of your" : "Some of your";
pline("%s %s %s!", mult, xname(obj),
(cnt > 1L) ? destroy_strings[dindx*3 + 1]
: destroy_strings[dindx*3]);
if(osym == POTION_CLASS && dmgtyp != AD_COLD) {
if (!breathless(youmonst.data) || haseyes(youmonst.data))
potionbreathe(obj);
}
if (obj->owornmask) {
if (obj->owornmask & W_RING) /* ring being worn */
Ring_gone(obj);
else
setnotworn(obj);
}
if (obj == current_wand) current_wand = 0; /* destroyed */
for (i = 0; i < cnt; i++)
useup(obj);
if(dmg) {
if(xresist) You("aren't hurt!");
else {
const char *how = destroy_strings[dindx * 3 + 2];
boolean one = (cnt == 1L);
losehp(dmg, one ? how : (const char *)makeplural(how),
one ? KILLED_BY_AN : KILLED_BY);
exercise(A_STR, FALSE);
}
}
}
}
return;
}
int
destroy_mitem(mtmp, osym, dmgtyp)
struct monst *mtmp;
int osym, dmgtyp;
{
struct obj *obj, *obj2;
int skip, tmp = 0;
long i, cnt, quan;
int dindx;
boolean vis;
if (mtmp == &youmonst) { /* this simplifies artifact_hit() */
destroy_item(osym, dmgtyp);
return 0; /* arbitrary; value doesn't matter to artifact_hit() */
}
vis = canseemon(mtmp);
for(obj = mtmp->minvent; obj; obj = obj2) {
obj2 = obj->nobj;
if(obj->oclass != osym) continue; /* test only objs of type osym */
skip = 0;
quan = 0L;
dindx = 0;
switch(dmgtyp) {
case AD_COLD:
if(osym == POTION_CLASS && obj->otyp != POT_OIL) {
quan = obj->quan;
dindx = 0;
tmp++;
} else skip++;
break;
case AD_FIRE:
if (obj->otyp == SCR_FIRE || obj->otyp == SPE_FIREBALL)
skip++;
if (obj->otyp == SPE_BOOK_OF_THE_DEAD) {
skip++;
if (vis)
pline("%s glows a strange %s, but remains intact.",
The(distant_name(obj, xname)),
hcolor("dark red"));
}
quan = obj->quan;
switch(osym) {
case POTION_CLASS:
dindx = 1;
tmp++;
break;
case SCROLL_CLASS:
dindx = 2;
tmp++;
break;
case SPBOOK_CLASS:
dindx = 3;
tmp++;
break;
default:
skip++;
break;
}
break;
case AD_ELEC:
quan = obj->quan;
switch(osym) {
case RING_CLASS:
if(obj->otyp == RIN_SHOCK_RESISTANCE)
{ skip++; break; }
dindx = 4;
break;
case WAND_CLASS:
if(obj->otyp == WAN_LIGHTNING) { skip++; break; }
dindx = 5;
tmp++;
break;
default:
skip++;
break;
}
break;
default:
skip++;
break;
}
if(!skip) {
for(i = cnt = 0L; i < quan; i++)
if(!rn2(3)) cnt++;
if(!cnt) continue;
if (vis) pline("%s %s %s!",
s_suffix(Monnam(mtmp)), xname(obj),
(cnt > 1L) ? destroy_strings[dindx*3 + 1]
: destroy_strings[dindx*3]);
for(i = 0; i < cnt; i++) m_useup(mtmp, obj);
}
}
return(tmp);
}
#endif /*OVL3*/
#ifdef OVL2
int
resist(mtmp, oclass, damage, tell)
struct monst *mtmp;
char oclass;
int damage, tell;
{
int resisted;
int alev, dlev;
/* attack level */
switch (oclass) {
case WAND_CLASS: alev = 12; break;
case TOOL_CLASS: alev = 10; break; /* instrument */
case WEAPON_CLASS: alev = 10; break; /* artifact */
case SCROLL_CLASS: alev = 9; break;
case POTION_CLASS: alev = 6; break;
case RING_CLASS: alev = 5; break;
default: alev = u.ulevel; break; /* spell */
}
/* defense level */
dlev = (int)mtmp->m_lev;
if (dlev > 50) dlev = 50;
else if (dlev < 1) dlev = is_mplayer(mtmp->data) ? u.ulevel : 1;
resisted = rn2(100 + alev - dlev) < mtmp->data->mr;
if (resisted) {
if (tell) {
shieldeff(mtmp->mx, mtmp->my);
pline("%s resists!", Monnam(mtmp));
}
damage = (damage + 1) / 2;
}
if (damage) {
mtmp->mhp -= damage;
if (mtmp->mhp < 1) {
if(m_using) monkilled(mtmp, "", AD_RBRE);
else killed(mtmp);
}
}
return(resisted);
}
void
makewish()
{
char buf[BUFSZ];
struct obj *otmp, nothing;
int tries = 0;
nothing = zeroobj; /* lint suppression; only its address matters */
if (flags.verbose) You("may wish for an object.");
retry:
getlin("For what do you wish?", buf);
if(buf[0] == '\033') buf[0] = 0;
/*
* Note: if they wished for and got a non-object successfully,
* otmp == &zeroobj. That includes gold, or an artifact that
* has been denied. Wishing for "nothing" requires a separate
* value to remain distinct.
*/
otmp = readobjnam(buf, ¬hing, TRUE);
if (!otmp) {
pline("Nothing fitting that description exists in the game.");
if (++tries < 5) goto retry;
pline(thats_enough_tries);
otmp = readobjnam((char *)0, (struct obj *)0, TRUE);
if (!otmp) return; /* for safety; should never happen */
} else if (otmp == ¬hing) {
/* explicitly wished for "nothing", presumeably attempting
to retain wishless conduct */
return;
}
/* KMH, conduct */
u.uconduct.wishes++;
if (otmp != &zeroobj) {
/* The(aobjnam()) is safe since otmp is unidentified -dlc */
(void) hold_another_object(otmp, u.uswallow ?
"Oops! %s out of your reach!" :
(Is_airlevel(&u.uz) ||
Is_waterlevel(&u.uz) ||
levl[u.ux][u.uy].typ < IRONBARS ||
levl[u.ux][u.uy].typ >= ICE) ?
"Oops! %s away from you!" :
"Oops! %s to the floor!",
The(aobjnam(otmp,
Is_airlevel(&u.uz) || u.uinwater ?
"slip" : "drop")),
(const char *)0);
u.ublesscnt += rn1(100,50); /* the gods take notice */
}
}
#endif /*OVL2*/
/*zap.c*/
| 51,688 |
2,542 |
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
#include "stdafx.h"
namespace ReliableMessaging
{
/*
There are two use cases for the KNodeWrapper template
1. As a KNode wrapper that can be used in constructing NOFAIL KNodeList and similar KTL data structures
of ItemType objects when they do not contain the required KListEntry field for allocation-free linking
2. As a KNode wrapper that can be used in constructing NOFAIL KNodeList and similar KTL data structures
of ItemType objects that do contain the required KListEntry field for allocation-free linking when that
field is already in use in a different data structure, e.g., a KHashTable
*/
template <class ItemType>
class KNodeWrapper : public KObject<KNodeWrapper<ItemType>>, public KShared<KNodeWrapper<ItemType>>
{
K_FORCE_SHARED(KNodeWrapper<ItemType>)
public:
static const ULONG LinkOffset;
KNodeWrapper(ItemType content)
{
content_ = content;
}
ItemType GetContent()
{
return content_;
}
private:
ItemType content_;
KListEntry qlink_;
};
/*
The ResourcePool template class is used for managing pools of reusable ItemType instances.
A set of ItemType instances is pre-allocated when the ResourcePool is constructed.
The size of the initial pool is a constructor parameter.
The ItemType must derive from KShared<ItemType> and must support a static Create factory
method with the signature
static Common::ErrorCode ItemType::Create(__in KAllocator& allocator, __out KSharedPtr<ItemType>& result)
which is used to populate the initial pool of ItemType instances as well as later instances
if the initial pool runs out (currently not implementing the latter use case).
Items are initialized outside of the pool functionality, however ItemType instances must
implement a parameterless Reuse method that is called to wipe ItemType instances when they
are returned to the pool.
Pooled ItemType instances are held in a KNodeListShared structure. As such they must contain
a LIST_ENTRY element and must supply its FIELD_OFFSET to the constructor of the ResourcePool.
*/
template <class ItemType>
class ResourcePool : public KObject<ResourcePool<ItemType>>
{
public:
ResourcePool(KAllocator &allocator, ULONG const LinkOffset) :
pool_(LinkOffset),
allocator_(allocator)
{}
// Initialize allocates real resources to the pool -- typically as part of a constructor or Open process
// Failure to Initialize is meant to be a cause for constructor/Open failure, hence Initialize will clear
// the pool if the initial volume of resources cannot be allocated, before returning with failure
Common::ErrorCode Initialize(LONG initialPoolSize, LONG increment)
{
initialPoolSize_ = initialPoolSize;
increment_ = increment;
for (LONG i=0; i<initialPoolSize_; i++)
{
KSharedPtr<ItemType> newItem = nullptr;
Common::ErrorCode createResult = ItemType::Create(allocator_,newItem);
if (!createResult.IsSuccess())
{
pool_.Reset();
return Common::ErrorCodeValue::OutOfMemory;
}
pool_.AppendTail(newItem);
}
return Common::ErrorCodeValue::Success;
}
// Grow is typically called when the pool runs out of resources, depending on throttling policy
// Grow is best effort -- it will return OutOfMemory if it cannot allocate thye full increment asked for
// but will not clear to pool of resources that have been successfully allocated
Common::ErrorCode Grow(ULONG increment)
{
for (ULONG i=0; i<increment; i++)
{
KSharedPtr<ItemType> newItem = nullptr;
Common::ErrorCode createResult = ItemType::Create(allocator_,newItem);
if (!createResult.IsSuccess())
{
return Common::ErrorCodeValue::OutOfMemory;
}
K_LOCK_BLOCK(poolLock_)
{
pool_.AppendTail(newItem);
}
}
return Common::ErrorCodeValue::Success;
}
// Shrink is typically called when the pool exceeds a policy maximum of resources
// Grow is best effort -- it will remove and release at most the increment asked for
// but will stop without failure if the pool runs out before the increment is satisfied
void Shrink(ULONG increment)
{
for (ULONG i=0; i<increment; i++)
{
KSharedPtr<ItemType> newItem = GetItem();
if (newItem !- nullptr)
{
ItemType *itemToRelease = newItem.Detach();
itemToRelease->Release();
}
else
{
// OK to shrink till we hit empty
break;
}
}
}
KSharedPtr<ItemType> GetItem()
{
KSharedPtr<ItemType> itemToReturn;
K_LOCK_BLOCK(poolLock_)
{
itemToReturn = pool_.RemoveHead();
}
return itemToReturn;
}
KSharedPtr<ItemType> GetItemGrowIfNeeded()
{
KSharedPtr<ItemType> item = GetItem();
Common::ErrorCode growResult = Common::ErrorCodeValue::Success;
while (item == nullptr && growResult.IsSuccess())
{
growResult = Grow(increment_);
item = GetItem();
}
return item;
}
void ReturnItem(__in KSharedPtr<ItemType>& item)
{
item->Reuse();
K_LOCK_BLOCK(poolLock_)
{
pool_.AppendTail(item);
}
}
private:
KNodeListShared<ItemType> pool_;
LONG initialPoolSize_;
LONG increment_;
KSpinLock poolLock_;
KAllocator &allocator_;
};
/*
The HashTableMap template class is a wrapper for KNodeHashTable that implements the patterns we need.
It assumes that ItemType derives from KShared<ItemType> and is designed for use with KNodeHashTable.
Specifically, HashTableMap
- manages AddRef and Release on successful insertion and deletion
- always uses FALSE for ForceUpdate
- uses simple bool success/failre reporting
- will eventually transparently manage Resizing
*/
template <class KeyType, class ItemType>
class HashTableMap : public KObject<HashTableMap<KeyType, ItemType>>
{
public:
typedef KDelegate<ULONG(const KeyType& Key)> HashFunctionType;
HashTableMap(
__in ULONG size,
__in HashFunctionType hashFunction,
__in ULONG keyOffset,
__in ULONG linkOffset,
__in KAllocator& allocator
)
: table_(size,hashFunction,keyOffset,linkOffset,allocator)
{}
bool Find(
__in KeyType& keyVal,
__out ItemType*& item)
{
NTSTATUS result = table_.Get(keyVal,item);
return (NT_SUCCESS(result));
}
bool Remove(
__in ItemType *item)
{
NTSTATUS result = table_.Remove(item);
if (NT_SUCCESS(result))
{
item->Release();
return true;
}
else
{
return false;
}
}
bool Insert(
__in ItemType *newItem)
{
NTSTATUS result = table_.Put(newItem,FALSE);
// the only failure status possible is collision
if (NT_SUCCESS(result))
{
newItem->AddRef();
return true;
}
else
{
ASSERT_IFNOT(result==STATUS_OBJECT_NAME_COLLISION, "Unexpected status returned from KNodeHashTable Put");
return false;
}
}
private:
KNodeHashTable<KeyType, ItemType> table_;
};
}
| 2,577 |
578 |
<filename>src/infra/Cpp20.h
#ifdef TWO_MODULES
#include <infra/Config.h>
#include <cassert>
#include <stdint.h>
#include <cfloat>
#include <climits>
//#include <cmath>
#include <ctime>
#include <cstring>
#include <cstdio>
#include <cstdlib>
#endif
| 108 |
348 |
{"nom":"Crempigny-Bonneguête","dpt":"Haute-Savoie","inscrits":215,"abs":41,"votants":174,"blancs":16,"nuls":4,"exp":154,"res":[{"panneau":"1","voix":82},{"panneau":"2","voix":72}]}
| 79 |
1,581 |
<gh_stars>1000+
//
// UIScrollView+ADKPullToRefreshView.h
// AppDevKit
//
// Created by <NAME> on 12/23/13.
// Copyright © 2013, Yahoo Inc.
// Licensed under the terms of the BSD License.
// Please see the LICENSE file in the project root for terms.
//
#import <UIKit/UIKit.h>
@class ADKPullToRefreshContentView;
@protocol ADKPullToRefreshViewProtocol <NSObject>
@required
- (void)ADKPullToRefreshStopped:(UIScrollView *)scrollView;
- (void)ADKPullToRefreshTriggered:(UIScrollView *)scrollView;
- (void)ADKPullToRefreshLoading:(UIScrollView *)scrollView;
@optional
/**
* @brief Determine when pull to refresh will trigger, should be times of handle view height, default is 1.5.
*
* @return times of handle view height
*/
- (CGFloat)ADKPullToRefreshTriggerDistanceTimes:(UIScrollView *)scrollView;
- (void)ADKPullToRefreshDragging:(UIScrollView *)scrollView;
- (void)ADKPullToRefreshView:(UIScrollView *)scrollView draggingWithProgress:(CGFloat)progress;
@end
@interface UIScrollView (ADKPullToRefreshView)
/**
* @brief Add a pull to refresh view at scrollview content view top, when pull to refresh trigger, actionHandler will be triggered.
*
* @param refreshHandleView A UIView should conform ADKPullToRefreshViewProtocol
* @param actionHandler Block will invoke while pull to refresh triggered
*/
- (void)ADKAddPullToRefreshWithHandleView:(UIView <ADKPullToRefreshViewProtocol> *)refreshHandleView actionHandler:(void (^)(void))actionHandler;
/**
* @brief Programmatically trigger pull to refresh, will invoke actionHandler.
*/
- (void)ADKTriggerPullToRefresh;
/**
* @brief Deal animation and infinite scrolling state. Please reference Class ADKPullToRefreshContentView.
*/
@property (nonatomic, readonly) ADKPullToRefreshContentView *pullToRefreshContentView;
/**
* @brief Change content size to make refreshHandleView visiable or not.
*/
@property (nonatomic, assign) BOOL showPullToRefresh;
/**
* @brief The distance between the bottom of pullToRefreshContentView and the top value of contentInset. The default value is 0. This method should be set after -addPullToRefreshWithHandleView:actionHandler: is invoked.
*/
@property (nonatomic, assign) CGFloat pullToRefreshContentViewBottomMargin;
@end
typedef NS_ENUM(NSUInteger, ADKPullToRefreshState) {
ADKPullToRefreshStateStopped = 1,
ADKPullToRefreshStateDragging,
ADKPullToRefreshStateTriggered,
ADKPullToRefreshStateLoading,
};
@interface ADKPullToRefreshContentView : UIView
/**
* @brief Start animation for infinite scrolling. Will triggered when ADKPullToRefreshStateLoading.
*/
- (void)startAnimating;
/**
* @brief Stop animation for pull to refresh. Remeber to call this method when data is ready to stop animation.
*/
- (void)stopAnimating;
/**
* @brief Stop animation for pull to refresh. Remeber to call this method when data is ready to stop animation.
*/
- (void)stopAnimatingAndScrollToTop;
/**
* @brief Top inset value of origin scroll view
*/
@property (nonatomic, assign) CGFloat originalTopInset;
/**
* @brief Bottom inset value of origin scroll view
*/
@property (nonatomic, assign) CGFloat originalBottomInset;
/**
* @brief Current state of pull to refresh
*/
@property (nonatomic, readonly) ADKPullToRefreshState state;
/**
* @brief Enable automatic fade in and fade out effect if value is YES.
*/
@property (nonatomic, assign) BOOL autoFadeEffect;
/**
* @brief Support display status change when you are using fake drag behavior. (For example: O2O new entry)
*/
@property (nonatomic, assign) BOOL detectDisplayStatusMode;
@end
| 1,134 |
701 |
<reponame>Keyboard-Slayer/brutal<filename>sources/libs/brutal/math/flow.h
#pragma once
#include <brutal/math/rect.h>
typedef enum
{
M_FLOW_LEFT_TO_RIGHT,
M_FLOW_RIGHT_TO_LEFT,
M_FLOW_TOP_TO_BOTTOM,
M_FLOW_BOTTOM_TO_TOP,
} MFlow;
MFlow m_flow_relative(MFlow parent, MFlow child);
MVec2 m_flow_to_vec(MFlow flow);
float m_flow_get_start(MFlow flow, MRect rect);
float m_flow_get_end(MFlow flow, MRect rect);
float m_flow_get_top(MFlow flow, MRect rect);
float m_flow_get_bottom(MFlow flow, MRect rect);
float m_flow_get_width(MFlow flow, MRect rect);
float m_flow_get_height(MFlow flow, MRect rect);
MVec2 m_flow_get_origin(MFlow flow, MRect rect);
float m_flow_get_hcenter(MFlow flow, MRect rect);
float m_flow_get_vcenter(MFlow flow, MRect rect);
MRect m_flow_set_start(MFlow flow, MRect rect, float value);
MRect m_flow_set_x(MFlow flow, MRect rect, float value);
MRect m_flow_set_end(MFlow flow, MRect rect, float value);
MRect m_flow_set_top(MFlow flow, MRect rect, float value);
MRect m_flow_set_y(MFlow flow, MRect rect, float value);
MRect m_flow_set_bottom(MFlow flow, MRect rect, float value);
MRect m_flow_set_origin(MFlow flow, MRect rect, MVec2 value);
MRect m_flow_set_width(MFlow flow, MRect rect, float value);
MRect m_flow_set_height(MFlow flow, MRect rect, float value);
| 556 |
307 |
<filename>code/graphics/tmapper.h<gh_stars>100-1000
/*
* Copyright (C) Volition, Inc. 1999. All rights reserved.
*
* All source code herein is the property of Volition, Inc. You may not sell
* or otherwise commercially exploit the source or things you created based on the
* source.
*
*/
#ifndef _TMAPPER_H
#define _TMAPPER_H
#include "globalincs/pstypes.h"
/*
struct vertex;
// call this to reinit the scanline function pointers.
extern void tmapper_setup();
// Used to tell the tmapper what the current lighting values are
// if the TMAP_FLAG_RAMP or TMAP_FLAG_RGB are set and the TMAP_FLAG_GOURAUD
// isn't set.
void tmapper_set_light(vertex *v, uint flags);
// DO NOT CALL grx_tmapper DIRECTLY!!!! Only use the
// gr_tmapper equivalent!!!!
extern void grx_tmapper( int nv, vertex * verts[], uint flags );
*/
#define TMAP_MAX_VERTS 25 // Max number of vertices per polygon
// Flags to pass to g3_draw_??? routines
#define TMAP_FLAG_TEXTURED (1<<0) // Uses texturing (Interpolate uv's)
#define TMAP_FLAG_CORRECT (1<<1) // Perspective correct (Interpolate sw)
#define TMAP_FLAG_RAMP (1<<2) // Use RAMP lighting (interpolate L)
#define TMAP_FLAG_RGB (1<<3) // Use RGB lighting (interpolate RGB)
#define TMAP_FLAG_GOURAUD (1<<4) // Lighting values differ on each vertex.
// If this is not set, then the texture mapper will use
// the lighting parameters in each vertex, otherwise it
// will use the ones specified in tmapper_set_??
#define TMAP_FLAG_XPARENT (1<<5) // texture could have transparency
#define TMAP_FLAG_TILED (1<<6) // This means uv's can be > 1.0
#define TMAP_FLAG_NEBULA (1<<7) // Must be used with RAMP and GOURAUD. Means l 0-1 is 0-31 palette entries
//#define TMAP_HIGHEST_FLAG_BIT 7 // The highest bit used in the TMAP_FLAGS
//#define TMAP_MAX_SCANLINES (1<<(TMAP_HIGHEST_FLAG_BIT+1))
// Add any entries that don't work for software under here:
// Make sure to disable them at top of grx_tmapper
#define TMAP_FLAG_ALPHA (1<<8) // Has an alpha component
#define TMAP_FLAG_BATCH_TRANSFORMS (1<<9) // Use batched transform data transmitted via texture/uniform buffer
// Interface specific stuff (for separate filtering, sizing, etc.), replaces old TMAP_FLAG_BITMAP_SECTION
#define TMAP_FLAG_INTERFACE (1<<10)
// flags for full nebula effect
#define TMAP_FLAG_PIXEL_FOG (1<<11) // fog the polygon based upon the average pixel colors of the backbuffer behind it
// RT Flags added to determine whats being drawn for HT&L
#define TMAP_HTL_3D_UNLIT (1<<12)
#define TMAP_HTL_2D (1<<13) // I don't think this flag is being used (Swifty)
//tristrips, for trails mostly, might find other uses eventualy
#define TMAP_FLAG_TRISTRIP (1<<14)
#define TMAP_FLAG_TRILIST (1<<15)
#define TMAP_FLAG_QUADLIST (1<<16)
#define TMAP_FLAG_QUADSTRIP (1<<17)
// use greyscale texture
#define TMAP_FLAG_BW_TEXTURE (1<<18)
// use animated Shader - Valathil
#define TMAP_ANIMATED_SHADER (1<<19)
// use soft particle shader - Swifty
#define TMAP_FLAG_SOFT_QUAD (1<<20)
// use framebuffer distortion mapping with generated distortion map - Valathil
#define TMAP_FLAG_DISTORTION_THRUSTER (1<<21)
// use framebuffer distortion mapping - Valathil
#define TMAP_FLAG_DISTORTION (1<<22)
#define TMAP_FLAG_DESATURATE (1<<23)
#define TMAP_FLAG_POINTLIST (1<<24)
#define TMAP_FLAG_LINESTRIP (1<<25)
#define TMAP_FLAG_LINES (1<<26)
#define TMAP_FLAG_VERTEX_GEN (1<<27)
#define TMAP_FLAG_EMISSIVE (1<<28)
#define TMAP_ADDRESS_WRAP 1
#define TMAP_ADDRESS_MIRROR 2
#define TMAP_ADDRESS_CLAMP 3
//WMC - moved this here so it'd be in 2d.h and 3d.h
//bitmap_2d_list,
//x and y: the 2d position of the upper left hand corner
//w and h: the hidth and hight of the bitmap (some functions
//will overide these, others will only overide if givein an invalid size like 0 or -1)
struct bitmap_2d_list{
bitmap_2d_list(int X=0, int Y=0, int W=-1, int H=-1):x(X),y(Y),w(W),h(H){}
int x;
int y;
int w;
int h;
};
//texture_rect
//defines a rectangular reagon within a texture
//similar to the above structure only all values are relitive
//from 0,0 in the upper left to 1,1 in the lowwer right
//out of range values are valid
struct texture_rect_list{
texture_rect_list(float u0In=0.0f, float v0In=0.0f, float u1In=1.0f, float v1In=1.0f):u0(u0In),v0(v0In),u1(u1In),v1(v1In){}
float u0;
float v0;
float u1;
float v1;
};
struct bitmap_rect_list{
bitmap_rect_list(float X, float Y, float W, float H):texture_rect(X,Y,W,H){}
bitmap_rect_list(int X=0, int Y=0, int W=-1, int H=-1, float TX=0.0f, float TY=0.0f, float TW=1.0f, float TH=1.0f):screen_rect(X,Y,W,H),texture_rect(TX,TY,TW,TH){}
bitmap_2d_list screen_rect;
texture_rect_list texture_rect;
};
#endif
| 1,903 |
558 |
package skadistats.clarity.processor.sendtables;
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
import it.unimi.dsi.fastutil.ints.IntSet;
import org.slf4j.Logger;
import skadistats.clarity.io.s2.Field;
import skadistats.clarity.io.s2.FieldType;
import skadistats.clarity.io.s2.S2DTClass;
import skadistats.clarity.io.s2.S2DecoderFactory;
import skadistats.clarity.io.s2.Serializer;
import skadistats.clarity.io.s2.SerializerId;
import skadistats.clarity.io.s2.field.ArrayField;
import skadistats.clarity.io.s2.field.VectorField;
import skadistats.clarity.io.s2.field.PointerField;
import skadistats.clarity.io.s2.field.SerializerField;
import skadistats.clarity.io.s2.field.ValueField;
import skadistats.clarity.logger.PrintfLoggerFactory;
import skadistats.clarity.model.BuildNumberRange;
import skadistats.clarity.wire.s2.proto.S2NetMessages;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import static skadistats.clarity.LogChannel.sendtables;
public class FieldGenerator {
private final Logger log = PrintfLoggerFactory.getLogger(sendtables);
private final S2NetMessages.CSVCMsg_FlattenedSerializer protoMessage;
private final FieldData[] fieldData;
private final IntSet checkedNames;
private final List<PatchFunc> patchFuncs;
private final Map<SerializerId, Serializer> serializers = new HashMap<>();
public FieldGenerator(S2NetMessages.CSVCMsg_FlattenedSerializer protoMessage, int buildNumber) {
this.protoMessage = protoMessage;
this.fieldData = new FieldData[protoMessage.getFieldsCount()];
this.checkedNames = new IntOpenHashSet();
this.patchFuncs = new ArrayList<>();
for (Map.Entry<BuildNumberRange, PatchFunc> patchEntry : PATCHES.entrySet()) {
if (patchEntry.getKey().appliesTo(buildNumber)) {
this.patchFuncs.add(patchEntry.getValue());
}
}
}
public void createFields() {
for (int i = 0; i < fieldData.length; i++) {
fieldData[i] = generateFieldData(protoMessage.getFields(i));
}
for (int i = 0; i < protoMessage.getSerializersCount(); i++) {
Serializer serializer = generateSerializer(protoMessage.getSerializers(i));
serializers.put(serializer.getId(), serializer);
}
}
public S2DTClass createDTClass(String name) {
SerializerField field = new SerializerField(
FieldType.forString(name),
serializers.get(new SerializerId(name, 0))
);
return new S2DTClass(field);
}
private FieldData generateFieldData(S2NetMessages.ProtoFlattenedSerializerField_t proto) {
return new FieldData(
FieldType.forString(sym(proto.getVarTypeSym())),
fieldNameFunction(proto),
new ProtoDecoderProperties(
proto.hasEncodeFlags() ? proto.getEncodeFlags() : null,
proto.hasBitCount() ? proto.getBitCount() : null,
proto.hasLowValue() ? proto.getLowValue() : null,
proto.hasHighValue() ? proto.getHighValue() : null,
proto.hasVarEncoderSym() ? sym(proto.getVarEncoderSym()) : null
),
proto.hasFieldSerializerNameSym() ?
new SerializerId(
sym(proto.getFieldSerializerNameSym()),
proto.getFieldSerializerVersion()
) : null
);
}
private Serializer generateSerializer(S2NetMessages.ProtoFlattenedSerializer_t proto) {
SerializerId sid = new SerializerId(
sym(proto.getSerializerNameSym()),
proto.getSerializerVersion()
);
Field[] fields = new Field[proto.getFieldsIndexCount()];
String[] fieldNames = new String[proto.getFieldsIndexCount()];
for (int i = 0; i < fields.length; i++) {
int fi = proto.getFieldsIndex(i);
if (fieldData[fi].field == null) {
fieldData[fi].field = createField(sid, fieldData[fi]);
}
fields[i] = fieldData[fi].field;
fieldNames[i] = fieldData[fi].name;
}
return new Serializer(sid, fields, fieldNames);
}
private Field createField(SerializerId sId, FieldData fd) {
for (PatchFunc patchFunc : patchFuncs) {
patchFunc.execute(sId, fd);
}
FieldType elementType;
switch (fd.category) {
case ARRAY:
elementType = fd.fieldType.getElementType();
break;
case VECTOR:
elementType = fd.fieldType.getGenericType();
break;
default:
elementType = fd.fieldType;
}
Field elementField;
if (fd.serializerId != null) {
if (fd.category == FieldCategory.POINTER) {
elementField = new PointerField(
elementType,
serializers.get(fd.serializerId)
);
} else {
elementField = new SerializerField(
elementType,
serializers.get(fd.serializerId)
);
}
} else {
elementField = new ValueField(
elementType,
S2DecoderFactory.createDecoder(fd.decoderProperties, elementType.getBaseType())
);
}
switch (fd.category) {
case ARRAY:
return new ArrayField(
fd.fieldType,
elementField,
fd.getArrayElementCount()
);
case VECTOR:
return new VectorField(
fd.fieldType,
elementField
);
default:
return elementField;
}
}
private String sym(int i) {
return protoMessage.getSymbols(i);
}
private String fieldNameFunction(S2NetMessages.ProtoFlattenedSerializerField_t field) {
int nameSym = field.getVarNameSym();
String name = sym(nameSym);
if (!checkedNames.contains(nameSym)) {
if (name.indexOf('.') != -1) {
log.warn("replay contains field with invalid name '%s'. Please open a github issue!", name);
}
checkedNames.add(nameSym);
}
return name;
}
private enum FieldCategory {
POINTER,
VECTOR,
ARRAY,
VALUE
}
private static class FieldData {
private final FieldType fieldType;
private final String name;
private final ProtoDecoderProperties decoderProperties;
private final SerializerId serializerId;
private final FieldCategory category;
private Field field;
public FieldData(FieldType fieldType, String name, ProtoDecoderProperties decoderProperties, SerializerId serializerId) {
this.fieldType = fieldType;
this.name = name;
this.decoderProperties = decoderProperties;
this.serializerId = serializerId;
if (determineIsPointer()) {
category = FieldCategory.POINTER;
} else if (determineIsVector()) {
category = FieldCategory.VECTOR;
} else if (determineIsArray()) {
category = FieldCategory.ARRAY;
} else {
category = FieldCategory.VALUE;
}
}
private boolean determineIsPointer() {
if (fieldType.isPointer()) return true;
switch (fieldType.getBaseType()) {
case "CBodyComponent":
case "CLightComponent":
case "CPhysicsComponent":
case "CRenderComponent":
case "CPlayerLocalData":
return true;
}
return false;
}
private boolean determineIsVector() {
if (serializerId != null) return true;
switch(fieldType.getBaseType()) {
case "CUtlVector":
case "CNetworkUtlVectorBase":
return true;
default:
return false;
}
}
private boolean determineIsArray() {
return fieldType.getElementCount() != null && !"char".equals(fieldType.getBaseType());
}
private int getArrayElementCount() {
String elementCount = fieldType.getElementCount();
switch (elementCount) {
case "MAX_ITEM_STOCKS":
return 8;
case "MAX_ABILITY_DRAFT_ABILITIES":
return 48;
default:
return Integer.parseInt(elementCount);
}
}
}
private static final SerializerId SID_PITCH_YAW = new SerializerId("CBodyComponentBaseAnimatingOverlay", 3);
private interface PatchFunc {
void execute(SerializerId serializerId, FieldData field);
}
private static final Map<BuildNumberRange, PatchFunc> PATCHES = new LinkedHashMap<>();
static {
PATCHES.put(new BuildNumberRange(null, 954), (serializerId, field) -> {
switch (field.name) {
case "m_flMana":
case "m_flMaxMana":
ProtoDecoderProperties up = field.decoderProperties;
if (up.highValue == 3.4028235E38f) {
up.lowValue = null;
up.highValue = 8192.0f;
}
}
});
PATCHES.put(new BuildNumberRange(null, 990), (serializerId, field) -> {
switch (field.name) {
case "dirPrimary":
case "localSound":
case "m_attachmentPointBoneSpace":
case "m_attachmentPointRagdollSpace":
case "m_flElasticity":
case "m_location":
case "m_poolOrigin":
case "m_ragPos":
case "m_vecEndPos":
case "m_vecEyeExitEndpoint":
case "m_vecGunCrosshair":
case "m_vecLadderDir":
case "m_vecPlayerMountPositionBottom":
case "m_vecPlayerMountPositionTop":
case "m_viewtarget":
case "m_WorldMaxs":
case "m_WorldMins":
case "origin":
case "vecExtraLocalOrigin":
case "vecLocalOrigin":
field.decoderProperties.encoderType = "coord";
break;
case "angExtraLocalAngles":
case "angLocalAngles":
case "m_angInitialAngles":
case "m_ragAngles":
case "m_vLightDirection":
field.decoderProperties.encoderType = "QAngle";
break;
case "m_vecLadderNormal":
field.decoderProperties.encoderType = "normal";
break;
case "m_angRotation":
field.decoderProperties.encoderType = SID_PITCH_YAW.equals(serializerId) ? "qangle_pitch_yaw" : "QAngle";
break;
}
});
PATCHES.put(new BuildNumberRange(1016, 1026), (serializerId, field) -> {
switch (field.name) {
case "m_bWorldTreeState":
case "m_ulTeamLogo":
case "m_ulTeamBaseLogo":
case "m_ulTeamBannerLogo":
case "m_iPlayerIDsInControl":
case "m_bItemWhiteList":
case "m_iPlayerSteamID":
field.decoderProperties.encoderType = "fixed64";
}
});
PATCHES.put(new BuildNumberRange(null, null), (serializerId, field) -> {
switch (field.name) {
case "m_flSimulationTime":
case "m_flAnimTime":
field.decoderProperties.encoderType = "simulationtime";
}
});
PATCHES.put(new BuildNumberRange(null, null), (serializerId, field) -> {
switch (field.name) {
case "m_flRuneTime":
ProtoDecoderProperties up = field.decoderProperties;
if (up.highValue == Float.MAX_VALUE && up.lowValue == -Float.MAX_VALUE) {
up.lowValue = null;
up.highValue = null;
}
}
});
}
}
| 6,476 |
422 |
#!/usr/bin/env python
from __future__ import division
import htpc, cherrypy, logging, xmlrpclib, base64
from cherrypy.lib.auth2 import require, member_of
from htpc.helpers import serve_template, fix_basepath, striphttp
class RTorrent(object):
def __init__(self):
self.logger = logging.getLogger('modules.rtorrent')
htpc.MODULES.append({
'name': 'rTorrent',
'id': 'rtorrent',
'test': htpc.WEBDIR + 'rtorrent/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'rtorrent_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'rtorrent_menuname'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'rtorrent_ssl'},
{'type': 'text', 'label': 'Host *', 'name': 'rtorrent_host', 'placeholder': 'localhost:80',
'desc': 'RPC Communication URI. Usually scgi://localhost:5000, httprpc://localhost/rutorrent or localhost:80'},
{'type': 'text', 'label': 'RPC Path', 'name': 'rtorrent_rpcpath',
'placeholder': '/RPC2', 'desc': 'Change if your RPC mount is at a different path'},
{'type': 'text', 'label': 'Username', 'name': 'rtorrent_username'},
{'type': 'password', 'label': 'Password',
'name': 'rtorrent_password'},
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('rtorrent.html').render(scriptname='rtorrent')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def queue(self):
server = xmlrpclib.Server(self.stored_rpcurl())
torrents = server.d.multicall("main", "d.get_name=",
"d.get_bytes_done=", "d.get_complete=", "d.get_ratio=",
"d.get_down_rate=", "d.get_up_rate=", "d.get_size_bytes=",
"d.get_hash=", "d.get_state=")
results = []
for torrent in torrents:
results.append({
'name': torrent[0],
'progress': (torrent[1] / torrent[6]) * 100,
'is_finished': torrent[2],
'ratio': torrent[3],
'download_payload_rate': torrent[4],
'upload_payload_rate': torrent[5],
'eta': '-1', # TODO implement eta calculation
'state': 'Started' if torrent[8] == 1 else 'Paused',
'hash': torrent[7],
'total_size': torrent[6]
})
return {'result': results}
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def ping(self, rtorrent_host, rtorrent_rpcpath, rtorrent_username='', rtorrent_password='', rtorrent_ssl=False, **kwargs):
server_url = self.rpc_url(
rtorrent_host, rtorrent_rpcpath, rtorrent_ssl, rtorrent_username, rtorrent_password)
self.logger.debug("Trying to contact rtorrent via %s" % server_url)
server = xmlrpclib.Server(server_url)
return server.system.client_version()
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def start(self, torrentId=False):
self.logger.debug("Starting torrent %s" % (torrentId))
server = xmlrpclib.Server(self.stored_rpcurl())
if torrentId is False:
return server.d.multicall("main", "d.start")
return server.d.start(torrentId)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def stop(self, torrentId=False):
self.logger.debug("Stopping torrent %s" % (torrentId))
server = xmlrpclib.Server(self.stored_rpcurl())
if torrentId is False:
return server.d.multicall("main", "d.stop")
return server.d.stop(torrentId)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def remove(self, torrentId):
self.logger.debug("Removing torrent %s" % (torrentId))
server = xmlrpclib.Server(self.stored_rpcurl())
return server.d.erase(torrentId)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def add(self, filename=None, metainfo=None):
self.logger.debug("Adding torrent: %s" % filename)
server = xmlrpclib.Server(self.stored_rpcurl())
if metainfo:
data = base64.b64decode(metainfo)
res = server.load_raw_start(xmlrpclib.Binary(data))
else:
res = server.load_start(filename)
return {'error': False} if res == 0 else {'error': True}
# For torrent search
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def to_client(self, link, torrentname, **kwargs):
self.logger.debug("Adding torrent from torrentsearch")
try:
return self.add(link)
except Exception as e:
self.logger.debug('Failed to add %s to rTorrent %s %s' % (torrentname, link, e))
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def stats(self):
server = xmlrpclib.Server(self.stored_rpcurl())
mc = xmlrpclib.MultiCall(server)
mc.throttle.global_down.rate()
mc.throttle.global_up.rate()
mc.throttle.global_down.max_rate()
mc.throttle.global_up.max_rate()
results = mc()
return {
'result': {
'stats': {
'download_rate': str(results[0] if results[0] >= 1024 else 0),
'upload_rate': str(results[1] if results[1] >= 1024 else 0),
'max_download_speed': str(results[2] / 1024 if results[2] >= 1024 else -1),
'max_upload_speed': str(results[3] / 1024 if results[3] >= 1024 else -1)
}
}
}
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def set_downspeed(self, speed):
speed = "%sk" % speed
self.logger.debug('Set download speed to %s' % speed)
server = xmlrpclib.Server(self.stored_rpcurl())
result = server.set_download_rate(speed)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def set_upspeed(self, speed):
speed = "%sk" % speed
self.logger.debug('Set upload speed to %s' % speed)
server = xmlrpclib.Server(self.stored_rpcurl())
result = server.set_upload_rate(speed)
def stored_rpcurl(self):
return self.rpc_url(htpc.settings.get('rtorrent_host', ''), htpc.settings.get('rtorrent_rpcpath', ''),
htpc.settings.get('rtorrent_ssl'), htpc.settings.get('rtorrent_username', ''), htpc.settings.get('rtorrent_password', ''))
def rpc_url(self, host, rpc_path, ssl, username, password):
host = striphttp(host)
rpc_path = fix_basepath(rpc_path).rstrip('/')
if not rpc_path:
rpc_path = '/RPC2'
ssl = 's' if ssl else ''
auth_string = ""
if username or password:
auth_string = "%s:%s@" % (username, password)
server_url = 'http%s://%s%s%s' % (ssl, auth_string, host, rpc_path)
return server_url
| 3,556 |
679 |
<gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_chart2.hxx"
#include "ChartController.hxx"
#include "dlg_InsertAxis_Grid.hxx"
#include "dlg_InsertDataLabel.hxx"
#include "dlg_InsertLegend.hxx"
#include "dlg_InsertTrendline.hxx"
#include "dlg_InsertErrorBars.hxx"
#include "dlg_InsertTitle.hxx"
#include "dlg_ObjectProperties.hxx"
#include "ChartWindow.hxx"
#include "ChartModelHelper.hxx"
#include "AxisHelper.hxx"
#include "TitleHelper.hxx"
#include "DiagramHelper.hxx"
#include "macros.hxx"
#include "chartview/DrawModelWrapper.hxx"
#include "NumberFormatterWrapper.hxx"
#include "ViewElementListProvider.hxx"
#include "MultipleChartConverters.hxx"
#include "ControllerLockGuard.hxx"
#include "UndoGuard.hxx"
#include "ResId.hxx"
#include "Strings.hrc"
#include "ReferenceSizeProvider.hxx"
#include "ObjectIdentifier.hxx"
#include "RegressionCurveHelper.hxx"
#include "RegressionCurveItemConverter.hxx"
#include "StatisticsHelper.hxx"
#include "ErrorBarItemConverter.hxx"
#include "MultipleItemConverter.hxx"
#include "DataSeriesHelper.hxx"
#include "ObjectNameProvider.hxx"
#include "LegendHelper.hxx"
#include <com/sun/star/chart2/XRegressionCurve.hpp>
#include <com/sun/star/chart/ErrorBarStyle.hpp>
#include <svx/ActionDescriptionProvider.hxx>
//--------------------------------------
// header for define RET_OK
#include <vcl/msgbox.hxx>
// header for class OUStringBuffer
#include <rtl/ustrbuf.hxx>
// header for class Application
#include <vcl/svapp.hxx>
// header for class ::vos::OGuard
#include <vos/mutex.hxx>
using namespace ::com::sun::star;
using namespace ::com::sun::star::chart2;
using ::com::sun::star::uno::Reference;
using ::com::sun::star::uno::Sequence;
using ::rtl::OUString;
//.............................................................................
namespace
{
struct lcl_InsertMeanValueLine
{
public:
lcl_InsertMeanValueLine( const uno::Reference< uno::XComponentContext > & xContext ) :
m_xContext( xContext )
{}
void operator()( const uno::Reference< chart2::XDataSeries > & xSeries )
{
uno::Reference< chart2::XRegressionCurveContainer > xRegCurveCnt(
xSeries, uno::UNO_QUERY );
if( xRegCurveCnt.is())
{
::chart::RegressionCurveHelper::addMeanValueLine(
xRegCurveCnt, m_xContext, uno::Reference< beans::XPropertySet >( xSeries, uno::UNO_QUERY ));
}
}
private:
uno::Reference< uno::XComponentContext > m_xContext;
};
} // anonymous namespace
//.............................................................................
namespace chart
{
//.............................................................................
void ChartController::executeDispatch_InsertAxes()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_AXES ))),
m_xUndoManager );
try
{
InsertAxisOrGridDialogData aDialogInput;
uno::Reference< XDiagram > xDiagram = ChartModelHelper::findDiagram(getModel());
AxisHelper::getAxisOrGridExcistence( aDialogInput.aExistenceList, xDiagram, sal_True );
AxisHelper::getAxisOrGridPossibilities( aDialogInput.aPossibilityList, xDiagram, sal_True );
::vos::OGuard aGuard( Application::GetSolarMutex());
SchAxisDlg aDlg( m_pChartWindow, aDialogInput );
if( aDlg.Execute() == RET_OK )
{
// lock controllers till end of block
ControllerLockGuard aCLGuard( getModel() );
InsertAxisOrGridDialogData aDialogOutput;
aDlg.getResult( aDialogOutput );
::std::auto_ptr< ReferenceSizeProvider > mpRefSizeProvider(
impl_createReferenceSizeProvider());
bool bChanged = AxisHelper::changeVisibilityOfAxes( xDiagram
, aDialogInput.aExistenceList, aDialogOutput.aExistenceList, m_xCC
, mpRefSizeProvider.get() );
if( bChanged )
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_InsertGrid()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_GRIDS ))),
m_xUndoManager );
try
{
InsertAxisOrGridDialogData aDialogInput;
uno::Reference< XDiagram > xDiagram = ChartModelHelper::findDiagram(getModel());
AxisHelper::getAxisOrGridExcistence( aDialogInput.aExistenceList, xDiagram, sal_False );
AxisHelper::getAxisOrGridPossibilities( aDialogInput.aPossibilityList, xDiagram, sal_False );
::vos::OGuard aGuard( Application::GetSolarMutex());
SchGridDlg aDlg( m_pChartWindow, aDialogInput );//aItemSet, b3D, bNet, bSecondaryX, bSecondaryY );
if( aDlg.Execute() == RET_OK )
{
// lock controllers till end of block
ControllerLockGuard aCLGuard( getModel() );
InsertAxisOrGridDialogData aDialogOutput;
aDlg.getResult( aDialogOutput );
bool bChanged = AxisHelper::changeVisibilityOfGrids( xDiagram
, aDialogInput.aExistenceList, aDialogOutput.aExistenceList, m_xCC );
if( bChanged )
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
void ChartController::executeDispatch_InsertTitles()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_TITLES ))),
m_xUndoManager );
try
{
TitleDialogData aDialogInput;
aDialogInput.readFromModel( getModel() );
::vos::OGuard aGuard( Application::GetSolarMutex());
SchTitleDlg aDlg( m_pChartWindow, aDialogInput );
if( aDlg.Execute() == RET_OK )
{
// lock controllers till end of block
ControllerLockGuard aCLGuard( getModel() );
TitleDialogData aDialogOutput( impl_createReferenceSizeProvider());
aDlg.getResult( aDialogOutput );
bool bChanged = aDialogOutput.writeDifferenceToModel( getModel(), m_xCC, &aDialogInput );
if( bChanged )
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_DeleteLegend()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::DELETE, String( SchResId( STR_OBJECT_LEGEND ))),
m_xUndoManager );
LegendHelper::hideLegend( getModel() );
aUndoGuard.commit();
}
void ChartController::executeDispatch_InsertLegend()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_LEGEND ))),
m_xUndoManager );
Reference< chart2::XLegend > xLegend = LegendHelper::showLegend( getModel(), m_xCC );
aUndoGuard.commit();
}
void ChartController::executeDispatch_OpenLegendDialog()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_LEGEND ))),
m_xUndoManager );
try
{
//prepare and open dialog
::vos::OGuard aGuard( Application::GetSolarMutex());
SchLegendDlg aDlg( m_pChartWindow, m_xCC );
aDlg.init( getModel() );
if( aDlg.Execute() == RET_OK )
{
// lock controllers till end of block
ControllerLockGuard aCLGuard( getModel() );
bool bChanged = aDlg.writeToModel( getModel() );
if( bChanged )
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
void ChartController::executeDispatch_InsertMenu_DataLabels()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_DATALABELS ))),
m_xUndoManager );
//if a series is selected insert labels for that series only:
uno::Reference< chart2::XDataSeries > xSeries(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel()), uno::UNO_QUERY );
if( xSeries.is() )
{
// add labels
DataSeriesHelper::insertDataLabelsToSeriesAndAllPoints( xSeries );
rtl::OUString aChildParticle( ObjectIdentifier::getStringForType( OBJECTTYPE_DATA_LABELS ) );
aChildParticle+=(C2U("="));
rtl::OUString aObjectCID = ObjectIdentifier::createClassifiedIdentifierForParticles(
ObjectIdentifier::getSeriesParticleFromCID(m_aSelection.getSelectedCID()), aChildParticle );
bool bSuccess = ChartController::executeDlg_ObjectProperties_withoutUndoGuard( aObjectCID, true );
if( bSuccess )
aUndoGuard.commit();
return;
}
try
{
wrapper::AllDataLabelItemConverter aItemConverter(
getModel(),
m_pDrawModelWrapper->GetItemPool(),
m_pDrawModelWrapper->getSdrModel(),
uno::Reference< lang::XMultiServiceFactory >( getModel(), uno::UNO_QUERY ));
SfxItemSet aItemSet = aItemConverter.CreateEmptyItemSet();
aItemConverter.FillItemSet( aItemSet );
//prepare and open dialog
::vos::OGuard aGuard( Application::GetSolarMutex());
//get number formatter
uno::Reference< util::XNumberFormatsSupplier > xNumberFormatsSupplier( getModel(), uno::UNO_QUERY );
NumberFormatterWrapper aNumberFormatterWrapper( xNumberFormatsSupplier );
SvNumberFormatter* pNumberFormatter = aNumberFormatterWrapper.getSvNumberFormatter();
DataLabelsDialog aDlg( m_pChartWindow, aItemSet, pNumberFormatter);
if( aDlg.Execute() == RET_OK )
{
SfxItemSet aOutItemSet = aItemConverter.CreateEmptyItemSet();
aDlg.FillItemSet( aOutItemSet );
// lock controllers till end of block
ControllerLockGuard aCLGuard( getModel() );
bool bChanged = aItemConverter.ApplyItemSet( aOutItemSet );//model should be changed now
if( bChanged )
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_InsertMenu_YErrorBars()
{
//if a series is selected insert error bars for that series only:
uno::Reference< chart2::XDataSeries > xSeries(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xSeries.is())
{
executeDispatch_InsertYErrorBars();
return;
}
//if no series is selected insert error bars for all series
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, ObjectNameProvider::getName_ObjectForAllSeries( OBJECTTYPE_DATA_ERRORS ) ),
m_xUndoManager );
try
{
wrapper::AllSeriesStatisticsConverter aItemConverter(
getModel(), m_pDrawModelWrapper->GetItemPool() );
SfxItemSet aItemSet = aItemConverter.CreateEmptyItemSet();
aItemConverter.FillItemSet( aItemSet );
//prepare and open dialog
::vos::OGuard aGuard( Application::GetSolarMutex());
InsertErrorBarsDialog aDlg(
m_pChartWindow, aItemSet,
uno::Reference< chart2::XChartDocument >( getModel(), uno::UNO_QUERY ));
aDlg.SetAxisMinorStepWidthForErrorBarDecimals(
InsertErrorBarsDialog::getAxisMinorStepWidthForErrorBarDecimals( getModel(), m_xChartView, rtl::OUString() ) );
if( aDlg.Execute() == RET_OK )
{
SfxItemSet aOutItemSet = aItemConverter.CreateEmptyItemSet();
aDlg.FillItemSet( aOutItemSet );
// lock controllers till end of block
ControllerLockGuard aCLGuard( getModel() );
bool bChanged = aItemConverter.ApplyItemSet( aOutItemSet );//model should be changed now
if( bChanged )
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_InsertMeanValue()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_AVERAGE_LINE ))),
m_xUndoManager );
lcl_InsertMeanValueLine( m_xCC ).operator()(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ));
aUndoGuard.commit();
}
void ChartController::executeDispatch_InsertMenu_MeanValues()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_AVERAGE_LINE ))),
m_xUndoManager );
uno::Reference< chart2::XDataSeries > xSeries(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xSeries.is() )
{
//if a series is selected insert mean value only for that series:
lcl_InsertMeanValueLine( m_xCC ).operator()(xSeries);
}
else
{
::std::vector< uno::Reference< chart2::XDataSeries > > aSeries(
DiagramHelper::getDataSeriesFromDiagram( ChartModelHelper::findDiagram( getModel() )));
::std::for_each( aSeries.begin(), aSeries.end(), lcl_InsertMeanValueLine( m_xCC ));
}
aUndoGuard.commit();
}
void ChartController::executeDispatch_InsertMenu_Trendlines()
{
//if a series is selected insert only for that series:
uno::Reference< chart2::XDataSeries > xSeries(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xSeries.is())
{
executeDispatch_InsertTrendline();
return;
}
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, ObjectNameProvider::getName_ObjectForAllSeries( OBJECTTYPE_DATA_CURVE ) ),
m_xUndoManager );
try
{
wrapper::AllSeriesStatisticsConverter aItemConverter(
getModel(), m_pDrawModelWrapper->GetItemPool() );
SfxItemSet aItemSet = aItemConverter.CreateEmptyItemSet();
aItemConverter.FillItemSet( aItemSet );
//prepare and open dialog
::vos::OGuard aGuard( Application::GetSolarMutex());
InsertTrendlineDialog aDlg( m_pChartWindow, aItemSet );
aDlg.adjustSize();
if( aDlg.Execute() == RET_OK )
{
SfxItemSet aOutItemSet = aItemConverter.CreateEmptyItemSet();
aDlg.FillItemSet( aOutItemSet );
// lock controllers till end of block
ControllerLockGuard aCLGuard( getModel() );
bool bChanged = aItemConverter.ApplyItemSet( aOutItemSet );//model should be changed now
if( bChanged )
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_InsertTrendline()
{
uno::Reference< chart2::XRegressionCurveContainer > xRegCurveCnt(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel()), uno::UNO_QUERY );
if( xRegCurveCnt.is())
{
UndoLiveUpdateGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_CURVE ))),
m_xUndoManager );
// add a linear curve
RegressionCurveHelper::addRegressionCurve(
RegressionCurveHelper::REGRESSION_TYPE_LINEAR, xRegCurveCnt, m_xCC );
// get an appropriate item converter
uno::Reference< chart2::XRegressionCurve > xCurve(
RegressionCurveHelper::getFirstCurveNotMeanValueLine( xRegCurveCnt ));
uno::Reference< beans::XPropertySet > xCurveProp( xCurve, uno::UNO_QUERY );
if( !xCurveProp.is())
return;
wrapper::RegressionCurveItemConverter aItemConverter(
xCurveProp, xRegCurveCnt, m_pDrawModelWrapper->getSdrModel().GetItemPool(),
m_pDrawModelWrapper->getSdrModel(),
uno::Reference< lang::XMultiServiceFactory >( getModel(), uno::UNO_QUERY ));
// open dialog
SfxItemSet aItemSet = aItemConverter.CreateEmptyItemSet();
aItemConverter.FillItemSet( aItemSet );
ObjectPropertiesDialogParameter aDialogParameter = ObjectPropertiesDialogParameter(
ObjectIdentifier::createDataCurveCID(
ObjectIdentifier::getSeriesParticleFromCID( m_aSelection.getSelectedCID()),
RegressionCurveHelper::getRegressionCurveIndex( xRegCurveCnt, xCurve ), false ));
aDialogParameter.init( getModel() );
ViewElementListProvider aViewElementListProvider( m_pDrawModelWrapper.get());
::vos::OGuard aGuard( Application::GetSolarMutex());
SchAttribTabDlg aDlg( m_pChartWindow, &aItemSet, &aDialogParameter, &aViewElementListProvider,
uno::Reference< util::XNumberFormatsSupplier >( getModel(), uno::UNO_QUERY ));
// note: when a user pressed "OK" but didn't change any settings in the
// dialog, the SfxTabDialog returns "Cancel"
if( aDlg.Execute() == RET_OK || aDlg.DialogWasClosedWithOK())
{
const SfxItemSet* pOutItemSet = aDlg.GetOutputItemSet();
if( pOutItemSet )
{
ControllerLockGuard aCLGuard( getModel() );
aItemConverter.ApplyItemSet( *pOutItemSet );
}
aUndoGuard.commit();
}
}
}
void ChartController::executeDispatch_InsertYErrorBars()
{
uno::Reference< chart2::XDataSeries > xSeries(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xSeries.is())
{
UndoLiveUpdateGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_ERROR_BARS ))),
m_xUndoManager );
// add error bars with standard deviation
uno::Reference< beans::XPropertySet > xErrorBarProp(
StatisticsHelper::addErrorBars( xSeries, m_xCC, ::com::sun::star::chart::ErrorBarStyle::STANDARD_DEVIATION ));
// get an appropriate item converter
wrapper::ErrorBarItemConverter aItemConverter(
getModel(), xErrorBarProp, m_pDrawModelWrapper->getSdrModel().GetItemPool(),
m_pDrawModelWrapper->getSdrModel(),
uno::Reference< lang::XMultiServiceFactory >( getModel(), uno::UNO_QUERY ));
// open dialog
SfxItemSet aItemSet = aItemConverter.CreateEmptyItemSet();
aItemConverter.FillItemSet( aItemSet );
ObjectPropertiesDialogParameter aDialogParameter = ObjectPropertiesDialogParameter(
ObjectIdentifier::createClassifiedIdentifierWithParent(
OBJECTTYPE_DATA_ERRORS, ::rtl::OUString(), m_aSelection.getSelectedCID()));
aDialogParameter.init( getModel() );
ViewElementListProvider aViewElementListProvider( m_pDrawModelWrapper.get());
::vos::OGuard aGuard( Application::GetSolarMutex());
SchAttribTabDlg aDlg( m_pChartWindow, &aItemSet, &aDialogParameter, &aViewElementListProvider,
uno::Reference< util::XNumberFormatsSupplier >( getModel(), uno::UNO_QUERY ));
aDlg.SetAxisMinorStepWidthForErrorBarDecimals(
InsertErrorBarsDialog::getAxisMinorStepWidthForErrorBarDecimals( getModel(), m_xChartView, m_aSelection.getSelectedCID()));
// note: when a user pressed "OK" but didn't change any settings in the
// dialog, the SfxTabDialog returns "Cancel"
if( aDlg.Execute() == RET_OK || aDlg.DialogWasClosedWithOK())
{
const SfxItemSet* pOutItemSet = aDlg.GetOutputItemSet();
if( pOutItemSet )
{
ControllerLockGuard aCLGuard( getModel() );
aItemConverter.ApplyItemSet( *pOutItemSet );
}
aUndoGuard.commit();
}
}
}
void ChartController::executeDispatch_InsertTrendlineEquation( bool bInsertR2 )
{
uno::Reference< chart2::XRegressionCurve > xRegCurve(
ObjectIdentifier::getObjectPropertySet( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( !xRegCurve.is() )
{
uno::Reference< chart2::XRegressionCurveContainer > xRegCurveCnt(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
xRegCurve.set( RegressionCurveHelper::getFirstCurveNotMeanValueLine( xRegCurveCnt ) );
}
if( xRegCurve.is())
{
uno::Reference< beans::XPropertySet > xEqProp( xRegCurve->getEquationProperties());
if( xEqProp.is())
{
// using assignment for broken gcc 3.3
UndoGuard aUndoGuard = UndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_CURVE_EQUATION ))),
m_xUndoManager );
xEqProp->setPropertyValue( C2U("ShowEquation"), uno::makeAny( true ));
xEqProp->setPropertyValue( C2U("ShowCorrelationCoefficient"), uno::makeAny( bInsertR2 ));
aUndoGuard.commit();
}
}
}
void ChartController::executeDispatch_InsertR2Value()
{
uno::Reference< beans::XPropertySet > xEqProp(
ObjectIdentifier::getObjectPropertySet( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xEqProp.is())
{
UndoGuard aUndoGuard = UndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_CURVE_EQUATION ))),
m_xUndoManager );
xEqProp->setPropertyValue( C2U("ShowCorrelationCoefficient"), uno::makeAny( true ));
aUndoGuard.commit();
}
}
void ChartController::executeDispatch_DeleteR2Value()
{
uno::Reference< beans::XPropertySet > xEqProp(
ObjectIdentifier::getObjectPropertySet( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xEqProp.is())
{
UndoGuard aUndoGuard = UndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_CURVE_EQUATION ))),
m_xUndoManager );
xEqProp->setPropertyValue( C2U("ShowCorrelationCoefficient"), uno::makeAny( false ));
aUndoGuard.commit();
}
}
void ChartController::executeDispatch_DeleteMeanValue()
{
uno::Reference< chart2::XRegressionCurveContainer > xRegCurveCnt(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xRegCurveCnt.is())
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::DELETE, String( SchResId( STR_OBJECT_AVERAGE_LINE ))),
m_xUndoManager );
RegressionCurveHelper::removeMeanValueLine( xRegCurveCnt );
aUndoGuard.commit();
}
}
void ChartController::executeDispatch_DeleteTrendline()
{
uno::Reference< chart2::XRegressionCurveContainer > xRegCurveCnt(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xRegCurveCnt.is())
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::DELETE, String( SchResId( STR_OBJECT_CURVE ))),
m_xUndoManager );
RegressionCurveHelper::removeAllExceptMeanValueLine( xRegCurveCnt );
aUndoGuard.commit();
}
}
void ChartController::executeDispatch_DeleteTrendlineEquation()
{
uno::Reference< chart2::XRegressionCurveContainer > xRegCurveCnt(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xRegCurveCnt.is())
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::DELETE, String( SchResId( STR_OBJECT_CURVE_EQUATION ))),
m_xUndoManager );
RegressionCurveHelper::removeEquations( xRegCurveCnt );
aUndoGuard.commit();
}
}
void ChartController::executeDispatch_DeleteYErrorBars()
{
uno::Reference< chart2::XDataSeries > xDataSeries(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ));
if( xDataSeries.is())
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::DELETE, String( SchResId( STR_OBJECT_CURVE ))),
m_xUndoManager );
StatisticsHelper::removeErrorBars( xDataSeries );
aUndoGuard.commit();
}
}
void ChartController::executeDispatch_InsertDataLabels()
{
uno::Reference< chart2::XDataSeries > xSeries(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xSeries.is() )
{
UndoGuard aUndoGuard = UndoGuard( ActionDescriptionProvider::createDescription( ActionDescriptionProvider::INSERT,
String( SchResId( STR_OBJECT_DATALABELS ))),
m_xUndoManager );
DataSeriesHelper::insertDataLabelsToSeriesAndAllPoints( xSeries );
aUndoGuard.commit();
}
}
void ChartController::executeDispatch_InsertDataLabel()
{
UndoGuard aUndoGuard = UndoGuard( ActionDescriptionProvider::createDescription( ActionDescriptionProvider::INSERT,
String( SchResId( STR_OBJECT_LABEL ))),
m_xUndoManager );
DataSeriesHelper::insertDataLabelToPoint( ObjectIdentifier::getObjectPropertySet( m_aSelection.getSelectedCID(), getModel() ) );
aUndoGuard.commit();
}
void ChartController::executeDispatch_DeleteDataLabels()
{
uno::Reference< chart2::XDataSeries > xSeries(
ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xSeries.is() )
{
UndoGuard aUndoGuard( ActionDescriptionProvider::createDescription( ActionDescriptionProvider::DELETE,
String( SchResId( STR_OBJECT_DATALABELS ))),
m_xUndoManager );
DataSeriesHelper::deleteDataLabelsFromSeriesAndAllPoints( xSeries );
aUndoGuard.commit();
}
}
void ChartController::executeDispatch_DeleteDataLabel()
{
UndoGuard aUndoGuard( ActionDescriptionProvider::createDescription( ActionDescriptionProvider::DELETE,
String( SchResId( STR_OBJECT_LABEL ))),
m_xUndoManager );
DataSeriesHelper::deleteDataLabelsFromPoint( ObjectIdentifier::getObjectPropertySet( m_aSelection.getSelectedCID(), getModel() ) );
aUndoGuard.commit();
}
void ChartController::executeDispatch_ResetAllDataPoints()
{
UndoGuard aUndoGuard( ActionDescriptionProvider::createDescription( ActionDescriptionProvider::FORMAT,
String( SchResId( STR_OBJECT_DATAPOINTS ))),
m_xUndoManager );
uno::Reference< chart2::XDataSeries > xSeries( ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xSeries.is() )
xSeries->resetAllDataPoints();
aUndoGuard.commit();
}
void ChartController::executeDispatch_ResetDataPoint()
{
UndoGuard aUndoGuard( ActionDescriptionProvider::createDescription( ActionDescriptionProvider::FORMAT,
String( SchResId( STR_OBJECT_DATAPOINT ))),
m_xUndoManager );
uno::Reference< chart2::XDataSeries > xSeries( ObjectIdentifier::getDataSeriesForCID( m_aSelection.getSelectedCID(), getModel() ), uno::UNO_QUERY );
if( xSeries.is() )
{
sal_Int32 nPointIndex = ObjectIdentifier::getIndexFromParticleOrCID( m_aSelection.getSelectedCID() );
xSeries->resetDataPoint( nPointIndex );
}
aUndoGuard.commit();
}
void ChartController::executeDispatch_InsertAxisTitle()
{
try
{
uno::Reference< XTitle > xTitle;
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_TITLE ))),
m_xUndoManager );
Reference< XAxis > xAxis = ObjectIdentifier::getAxisForCID( m_aSelection.getSelectedCID(), getModel() );
sal_Int32 nDimensionIndex = -1;
sal_Int32 nCooSysIndex = -1;
sal_Int32 nAxisIndex = -1;
AxisHelper::getIndicesForAxis( xAxis, ChartModelHelper::findDiagram(getModel()), nCooSysIndex, nDimensionIndex, nAxisIndex );
TitleHelper::eTitleType eTitleType = TitleHelper::X_AXIS_TITLE;
if( nDimensionIndex==0 )
eTitleType = nAxisIndex==0 ? TitleHelper::X_AXIS_TITLE : TitleHelper::SECONDARY_X_AXIS_TITLE;
else if( nDimensionIndex==1 )
eTitleType = nAxisIndex==0 ? TitleHelper::Y_AXIS_TITLE : TitleHelper::SECONDARY_Y_AXIS_TITLE;
else
eTitleType = TitleHelper::Z_AXIS_TITLE;
::std::auto_ptr< ReferenceSizeProvider > apRefSizeProvider( impl_createReferenceSizeProvider());
xTitle = TitleHelper::createTitle( eTitleType, ObjectNameProvider::getTitleNameByType(eTitleType), getModel(), m_xCC, apRefSizeProvider.get() );
aUndoGuard.commit();
}
/*
if( xTitle.is() )
{
OUString aTitleCID = ObjectIdentifier::createClassifiedIdentifierForObject( xTitle, getModel() );
select( uno::makeAny(aTitleCID) );
executeDispatch_EditText();
}
*/
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_InsertAxis()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_AXIS ))),
m_xUndoManager );
try
{
Reference< XAxis > xAxis = ObjectIdentifier::getAxisForCID( m_aSelection.getSelectedCID(), getModel() );
if( xAxis.is() )
{
AxisHelper::makeAxisVisible( xAxis );
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_DeleteAxis()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::DELETE, String( SchResId( STR_OBJECT_AXIS ))),
m_xUndoManager );
try
{
Reference< XAxis > xAxis = ObjectIdentifier::getAxisForCID( m_aSelection.getSelectedCID(), getModel() );
if( xAxis.is() )
{
AxisHelper::makeAxisInvisible( xAxis );
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_InsertMajorGrid()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_GRID ))),
m_xUndoManager );
try
{
Reference< XAxis > xAxis = ObjectIdentifier::getAxisForCID( m_aSelection.getSelectedCID(), getModel() );
if( xAxis.is() )
{
AxisHelper::makeGridVisible( xAxis->getGridProperties() );
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_DeleteMajorGrid()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::DELETE, String( SchResId( STR_OBJECT_GRID ))),
m_xUndoManager );
try
{
Reference< XAxis > xAxis = ObjectIdentifier::getAxisForCID( m_aSelection.getSelectedCID(), getModel() );
if( xAxis.is() )
{
AxisHelper::makeGridInvisible( xAxis->getGridProperties() );
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_InsertMinorGrid()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::INSERT, String( SchResId( STR_OBJECT_GRID ))),
m_xUndoManager );
try
{
Reference< XAxis > xAxis = ObjectIdentifier::getAxisForCID( m_aSelection.getSelectedCID(), getModel() );
if( xAxis.is() )
{
Sequence< Reference< beans::XPropertySet > > aSubGrids( xAxis->getSubGridProperties() );
for( sal_Int32 nN=0; nN<aSubGrids.getLength(); nN++)
AxisHelper::makeGridVisible( aSubGrids[nN] );
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
void ChartController::executeDispatch_DeleteMinorGrid()
{
UndoGuard aUndoGuard(
ActionDescriptionProvider::createDescription(
ActionDescriptionProvider::DELETE, String( SchResId( STR_OBJECT_GRID ))),
m_xUndoManager );
try
{
Reference< XAxis > xAxis = ObjectIdentifier::getAxisForCID( m_aSelection.getSelectedCID(), getModel() );
if( xAxis.is() )
{
Sequence< Reference< beans::XPropertySet > > aSubGrids( xAxis->getSubGridProperties() );
for( sal_Int32 nN=0; nN<aSubGrids.getLength(); nN++)
AxisHelper::makeGridInvisible( aSubGrids[nN] );
aUndoGuard.commit();
}
}
catch( uno::RuntimeException& e)
{
ASSERT_EXCEPTION( e );
}
}
//.............................................................................
} //namespace chart
//.............................................................................
| 14,734 |
45,293 |
<reponame>Mu-L/kotlin
/* ----------------------------------------------------------------------------
Copyright (c) 2018-2020 Microsoft Research, <NAME>
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"licenses/third_party/mimalloc_LICENSE.txt" at the root of this distribution.
-----------------------------------------------------------------------------*/
#pragma once
#ifndef MIMALLOC_OVERRIDE_H
#define MIMALLOC_OVERRIDE_H
/* ----------------------------------------------------------------------------
This header can be used to statically redirect malloc/free and new/delete
to the mimalloc variants. This can be useful if one can include this file on
each source file in a project (but be careful when using external code to
not accidentally mix pointers from different allocators).
-----------------------------------------------------------------------------*/
#include <mimalloc.h>
// Standard C allocation
#define malloc(n) mi_malloc(n)
#define calloc(n,c) mi_calloc(n,c)
#define realloc(p,n) mi_realloc(p,n)
#define free(p) mi_free(p)
#define strdup(s) mi_strdup(s)
#define strndup(s,n) mi_strndup(s,n)
#define realpath(f,n) mi_realpath(f,n)
// Microsoft extensions
#define _expand(p,n) mi_expand(p,n)
#define _msize(p) mi_usable_size(p)
#define _recalloc(p,n,c) mi_recalloc(p,n,c)
#define _strdup(s) mi_strdup(s)
#define _strndup(s,n) mi_strndup(s,n)
#define _wcsdup(s) (wchar_t*)mi_wcsdup((const unsigned short*)(s))
#define _mbsdup(s) mi_mbsdup(s)
#define _dupenv_s(b,n,v) mi_dupenv_s(b,n,v)
#define _wdupenv_s(b,n,v) mi_wdupenv_s((unsigned short*)(b),n,(const unsigned short*)(v))
// Various Posix and Unix variants
#define reallocf(p,n) mi_reallocf(p,n)
#define malloc_size(p) mi_usable_size(p)
#define malloc_usable_size(p) mi_usable_size(p)
#define cfree(p) mi_free(p)
#define valloc(n) mi_valloc(n)
#define pvalloc(n) mi_pvalloc(n)
#define reallocarray(p,s,n) mi_reallocarray(p,s,n)
#define memalign(a,n) mi_memalign(a,n)
#define aligned_alloc(a,n) mi_aligned_alloc(a,n)
#define posix_memalign(p,a,n) mi_posix_memalign(p,a,n)
#define _posix_memalign(p,a,n) mi_posix_memalign(p,a,n)
// Microsoft aligned variants
#define _aligned_malloc(n,a) mi_malloc_aligned(n,a)
#define _aligned_realloc(p,n,a) mi_realloc_aligned(p,n,a)
#define _aligned_recalloc(p,s,n,a) mi_aligned_recalloc(p,s,n,a)
#define _aligned_msize(p,a,o) mi_usable_size(p)
#define _aligned_free(p) mi_free(p)
#define _aligned_offset_malloc(n,a,o) mi_malloc_aligned_at(n,a,o)
#define _aligned_offset_realloc(p,n,a,o) mi_realloc_aligned_at(p,n,a,o)
#define _aligned_offset_recalloc(p,s,n,a,o) mi_recalloc_aligned_at(p,s,n,a,o)
#endif // MIMALLOC_OVERRIDE_H
| 1,424 |
3,094 |
"""
Implementation of SVM using cvxopt package. Implementation uses
soft margin and I've defined linear, polynomial and gaussian kernels.
To understand the theory (which is a bit challenging) I recommend reading the following:
http://cs229.stanford.edu/notes/cs229-notes3.pdf
https://www.youtube.com/playlist?list=PLoROMvodv4rMiGQp3WXShtMGgzqpfVfbU (Lectures 6,7 by Andrew Ng)
To understand how to reformulate the optimization problem we obtain
to get the input to cvxopt QP solver this blogpost can be useful:
https://xavierbourretsicotte.github.io/SVM_implementation.html
Programmed by <NAME> <aladdin.persson at hotmail dot com>
* 2020-04-26 Initial coding
"""
import numpy as np
import cvxopt
from utils import create_dataset, plot_contour
def linear(x, z):
return np.dot(x, z.T)
def polynomial(x, z, p=5):
return (1 + np.dot(x, z.T)) ** p
def gaussian(x, z, sigma=0.1):
return np.exp(-np.linalg.norm(x - z, axis=1) ** 2 / (2 * (sigma ** 2)))
class SVM:
def __init__(self, kernel=gaussian, C=1):
self.kernel = kernel
self.C = C
def fit(self, X, y):
self.y = y
self.X = X
m, n = X.shape
# Calculate Kernel
self.K = np.zeros((m, m))
for i in range(m):
self.K[i, :] = self.kernel(X[i, np.newaxis], self.X)
# Solve with cvxopt final QP needs to be reformulated
# to match the input form for cvxopt.solvers.qp
P = cvxopt.matrix(np.outer(y, y) * self.K)
q = cvxopt.matrix(-np.ones((m, 1)))
G = cvxopt.matrix(np.vstack((np.eye(m) * -1, np.eye(m))))
h = cvxopt.matrix(np.hstack((np.zeros(m), np.ones(m) * self.C)))
A = cvxopt.matrix(y, (1, m), "d")
b = cvxopt.matrix(np.zeros(1))
cvxopt.solvers.options["show_progress"] = False
sol = cvxopt.solvers.qp(P, q, G, h, A, b)
self.alphas = np.array(sol["x"])
def predict(self, X):
y_predict = np.zeros((X.shape[0]))
sv = self.get_parameters(self.alphas)
for i in range(X.shape[0]):
y_predict[i] = np.sum(
self.alphas[sv]
* self.y[sv, np.newaxis]
* self.kernel(X[i], self.X[sv])[:, np.newaxis]
)
return np.sign(y_predict + self.b)
def get_parameters(self, alphas):
threshold = 1e-5
sv = ((alphas > threshold) * (alphas < self.C)).flatten()
self.w = np.dot(X[sv].T, alphas[sv] * self.y[sv, np.newaxis])
self.b = np.mean(
self.y[sv, np.newaxis]
- self.alphas[sv] * self.y[sv, np.newaxis] * self.K[sv, sv][:, np.newaxis]
)
return sv
if __name__ == "__main__":
np.random.seed(1)
X, y = create_dataset(N=50)
svm = SVM(kernel=gaussian)
svm.fit(X, y)
y_pred = svm.predict(X)
plot_contour(X, y, svm)
print(f"Accuracy: {sum(y==y_pred)/y.shape[0]}")
| 1,421 |
782 |
<reponame>gatordevin/BoofCV
/*
* Copyright (c) 2020, <NAME>. All Rights Reserved.
*
* This file is part of BoofCV (http://boofcv.org).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package boofcv.abst.disparity;
import boofcv.struct.image.ImageBase;
import boofcv.struct.image.ImageGray;
import boofcv.struct.image.ImageType;
/**
* Provides do nothing implementations of all the functions for easy mocking
*
* @author <NAME>
*/
public abstract class StereoDisparityAbstract<T extends ImageBase<T>, D extends ImageGray<D>>
implements StereoDisparity<T, D> {
ImageType<T> imageType;
Class<D> disparityType;
protected StereoDisparityAbstract( ImageType<T> imageType, Class<D> disparityType ) {
this.imageType = imageType;
this.disparityType = disparityType;
}
protected StereoDisparityAbstract() {}
@Override public void process( T imageLeft, T imageRight ) {}
@Override public D getDisparity() {return null;}
@Override public int getDisparityMin() {return 0;}
@Override public int getDisparityRange() {return 0;}
@Override public int getInvalidValue() {return 0;}
@Override public int getBorderX() {return 0;}
@Override public int getBorderY() {return 0;}
@Override public ImageType<T> getInputType() {return imageType;}
@Override public Class<D> getDisparityType() {return disparityType;}
}
| 546 |
478 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "pch.h"
#include "AdaptiveContainerStyleDefinition.h"
#include "AdaptiveContainerStylesDefinition.h"
#include "AdaptiveContainerStylesDefinition.g.cpp"
namespace winrt::AdaptiveCards::Rendering::Uwp::implementation
{
AdaptiveContainerStylesDefinition::AdaptiveContainerStylesDefinition(::AdaptiveCards::ContainerStylesDefinition const& stylesDefinition) :
Default{winrt::make<implementation::AdaptiveContainerStyleDefinition>(stylesDefinition.defaultPalette)},
Emphasis{winrt::make<implementation::AdaptiveContainerStyleDefinition>(stylesDefinition.emphasisPalette)},
Good{winrt::make<implementation::AdaptiveContainerStyleDefinition>(stylesDefinition.goodPalette)},
Attention{winrt::make<implementation::AdaptiveContainerStyleDefinition>(stylesDefinition.attentionPalette)},
Warning{winrt::make<implementation::AdaptiveContainerStyleDefinition>(stylesDefinition.warningPalette)},
Accent{winrt::make<implementation::AdaptiveContainerStyleDefinition>(stylesDefinition.accentPalette)}
{
}
}
| 334 |
2,053 |
package scouter.agent.counter.task;
import scouter.agent.Configure;
import scouter.agent.Logger;
import scouter.agent.counter.CounterBasket;
import scouter.agent.counter.anotation.Counter;
import scouter.agent.counter.jmx.LazyPlatformMBeanServer;
import scouter.lang.TimeTypeEnum;
import scouter.lang.pack.PerfCounterPack;
import scouter.lang.value.FloatValue;
import scouter.util.StringEnumer;
import scouter.util.StringSet;
import scouter.util.StringUtil;
public class CustomJmx {
Configure conf = Configure.getInstance();
LazyPlatformMBeanServer mBeanServer;
boolean mBeanServerEnable = true;
@Counter
public void extractJmx(CounterBasket pw) {
if (conf.counter_custom_jmx_enabled == false || mBeanServerEnable == false) {
return;
}
StringSet nameSet = conf.getCustomJmxSet();
if (nameSet.size() < 1) {
return;
}
if (mBeanServer == null) {
mBeanServer = LazyPlatformMBeanServer.create();
}
try {
if (mBeanServer.checkInit()) {
StringEnumer stringEnumer = nameSet.keys();
PerfCounterPack pack = pw.getPack(TimeTypeEnum.REALTIME);
while (stringEnumer.hasMoreElements()) {
String next = stringEnumer.nextString();
String[] mbeanAndAttribute = StringUtil.split(next, "|");
if (mbeanAndAttribute.length != 3) continue;
float value = mBeanServer.getValue(mbeanAndAttribute[1], mbeanAndAttribute[2]);
if (value >= 0) {
pack.put(mbeanAndAttribute[0], new FloatValue(value));
}
}
}
} catch (Exception e) {
Logger.println("SC-555", e.getMessage(), e);
mBeanServerEnable = false;
}
}
}
| 865 |
333 |
from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationTestCallAssembler
import sys
# On Windows, this test crashes obscurely, but only if compiled with
# Boehm, not if run with no GC at all. So for now we'll assume it is
# really a Boehm bug, or maybe a Boehm-on-Windows-specific issue, and
# skip.
if sys.platform == 'win32':
import py
py.test.skip("crashes on Windows (Boehm issue?)")
class TestTranslationCallAssemblerAarch64(TranslationTestCallAssembler):
pass
| 157 |
1,006 |
/****************************************************************************
* libs/libc/pwd/lib_getpwbuf.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <pwd.h>
#include "pwd/lib_pwd.h"
#include "libc.h"
/****************************************************************************
* Private Data
****************************************************************************/
static FAR char *g_buf;
static FAR struct passwd *g_pwd;
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: getpwbuf
*
* Description:
* libc/grp internal helper function for getpwgid and getpwnam to allocate
* and setup a passwd structure once a matching entry has been found.
*
* Input Parameters:
* uid - Value to set the passwd structure's pw_uid field to.
* gid - Value to set the passwd structure's pw_gid field to.
* name - Value to set the passwd structure's pw_name field to.
* dir - Value to set the passwd structure's pw_dir field to.
* shell - Value to set the passwd structure's pw_shell field to.
*
* Returned Value:
* A pointer to a statically allocated passwd structure, or NULL if an
* error occurs, in which case errno is set appropriately.
*
****************************************************************************/
FAR struct passwd *getpwbuf(uid_t uid, gid_t gid, FAR const char *name,
FAR const char *dir, FAR const char *shell)
{
FAR struct passwd *result;
FAR char *newbuf;
size_t buflen;
int err;
buflen = strlen(name) + 1 + strlen(dir) + 1 + strlen(shell) + 1;
newbuf = (FAR char *)lib_realloc(g_buf, buflen);
if (!newbuf)
{
err = ENOMEM;
goto error;
}
g_buf = newbuf;
if (!g_pwd)
{
g_pwd = (FAR struct passwd *)lib_malloc(sizeof(struct passwd));
}
if (!g_pwd)
{
err = ENOMEM;
goto error;
}
err = getpwbuf_r(uid, gid, name, dir, shell,
g_pwd, g_buf, buflen, &result);
if (err)
{
goto error;
}
return result;
error:
lib_free(g_pwd);
lib_free(g_buf);
g_pwd = NULL;
g_buf = NULL;
set_errno(err);
return NULL;
}
| 1,033 |
743 |
package pl.allegro.tech.hermes.domain.filtering.chain;
import pl.allegro.tech.hermes.domain.filtering.FilterableMessage;
import pl.allegro.tech.hermes.domain.filtering.MessageFilter;
import java.util.ArrayList;
import java.util.List;
public final class FilterChain {
private final List<MessageFilter> messageFilters;
FilterChain(final List<MessageFilter> messageFilters) {
this.messageFilters = new ArrayList<>(messageFilters);
}
public FilterResult apply(final FilterableMessage message) {
for (MessageFilter filter : messageFilters) {
try {
if (!filter.test(message)) {
return FilterResult.failed(filter.getType(), "logical");
}
} catch (Exception ex) {
return FilterResult.failed(filter.getType(), ex);
}
}
return FilterResult.PASS;
}
}
| 368 |
14,668 |
<reponame>chromium/chromium<filename>content/browser/aggregation_service/aggregatable_report.h
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_BROWSER_AGGREGATION_SERVICE_AGGREGATABLE_REPORT_H_
#define CONTENT_BROWSER_AGGREGATION_SERVICE_AGGREGATABLE_REPORT_H_
#include <stddef.h>
#include <stdint.h>
#include <ostream>
#include <string>
#include <vector>
#include "base/time/time.h"
#include "base/values.h"
#include "content/browser/aggregation_service/public_key.h"
#include "content/common/content_export.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#include "url/origin.h"
namespace content {
class AggregatableReportRequest;
// The underlying private information which will be sent to the processing
// origins for aggregation. Each payload encodes a single contribution to a
// histogram bucket. This will be encrypted and won't be readable by the
// reporting endpoint.
struct CONTENT_EXPORT AggregationServicePayloadContents {
// TODO(alexmt): Add kDistinctCount option.
enum class Operation {
kHistogram = 0,
kMaxValue = kHistogram,
};
enum class ProcessingType {
kTwoParty = 0,
kSingleServer = 1,
kMaxValue = kSingleServer,
};
AggregationServicePayloadContents(Operation operation,
int bucket,
int value,
ProcessingType processing_type,
url::Origin reporting_origin);
Operation operation;
int bucket;
int value;
ProcessingType processing_type;
url::Origin reporting_origin;
};
// Represents the information that will be provided to both the reporting
// endpoint and the processing origin(s), i.e. stored in the encrypted payload
// and in the plaintext report.
struct CONTENT_EXPORT AggregatableReportSharedInfo {
AggregatableReportSharedInfo(base::Time scheduled_report_time,
std::string privacy_budget_key);
base::Time scheduled_report_time;
std::string privacy_budget_key;
};
// An AggregatableReport contains all the information needed for sending the
// report to its reporting endpoint. All nested information has already been
// serialized and encrypted as necessary.
class CONTENT_EXPORT AggregatableReport {
public:
// This is used to encapsulate the data that is specific to a single
// processing origin.
struct CONTENT_EXPORT AggregationServicePayload {
AggregationServicePayload(url::Origin origin,
std::vector<uint8_t> payload,
std::string key_id);
AggregationServicePayload(AggregationServicePayload&& other);
AggregationServicePayload& operator=(AggregationServicePayload&& other);
~AggregationServicePayload();
url::Origin origin;
// This payload is constructed using the data in the
// AggregationServicePayloadContents and then encrypted with one of
// `origin`'s public keys. For the kTwoParty processing type, the plaintext
// of the encrypted payload is a serialized CBOR map structured as follows:
// {
// "version": "<API version>",
// "operation": "<chosen operation as string>",
// "privacy_budget_key": "<field for server to do privacy budgeting>",
// "scheduled_report_time": <timestamp in msec>,
// "reporting_origin": "https://reporter.example",
// "dpf_key": <binary serialization of the DPF key>,
// }
// For the kSingleServer processing type, the "dpf_key" field is replaced
// with:
// "data": [{ "bucket": <bucket>, "value": <value> }]
// If two processing origins are provided, one payload (chosen randomly)
// would contain that data and the other would instead contain:
// "data": []
std::vector<uint8_t> payload;
// Indicates the chosen encryption key.
std::string key_id;
};
// Used to allow mocking `CreateFromRequestAndPublicKeys()` in tests.
class CONTENT_EXPORT Provider {
public:
virtual ~Provider();
// Processes and serializes the information in `report_request` and encrypts
// using the `public_keys` as necessary. The order of `public_keys` should
// correspond to `report_request.processing_origins`, which should be
// sorted. Returns `absl::nullopt` if an error occurred during construction.
virtual absl::optional<AggregatableReport> CreateFromRequestAndPublicKeys(
AggregatableReportRequest report_request,
std::vector<PublicKey> public_keys) const;
// Sets whether to disable encryption of the payload(s). Should only be used
// by the AggregationServiceTool.
static void SetDisableEncryptionForTestingTool(bool should_disable);
private:
static bool g_disable_encryption_for_testing_tool_;
};
// log_2 of the number of buckets
static constexpr size_t kBucketDomainBitLength = 32;
// log_2 of the value output space
static constexpr size_t kValueDomainBitLength = 64;
// Used as the authenticated information (i.e. context info). This value must
// not be reused for new protocols or versions of this protocol unless the
// ciphertexts are intended to be compatible. This ensures that, even if
// public keys are reused, the same ciphertext cannot be (i.e. no cross-
// protocol attacks).
static constexpr char kDomainSeparationValue[] = "aggregation_service";
AggregatableReport(std::vector<AggregationServicePayload> payloads,
AggregatableReportSharedInfo shared_info);
// Move-only.
AggregatableReport(AggregatableReport&& other);
AggregatableReport& operator=(AggregatableReport&& other);
~AggregatableReport();
const std::vector<AggregationServicePayload>& payloads() const {
return payloads_;
}
const AggregatableReportSharedInfo& shared_info() const {
return shared_info_;
}
// Returns the JSON representation of this report of the form
// {
// "scheduled_report_time": "<timestamp in msec>",
// "privacy_budget_key": "<field for server to do privacy budgeting>",
// "version": "<api version>",
// "aggregation_service_payloads": [
// {
// "origin": "https://helper1.example",
// "payload": "<base64 encoded encrypted data>",
// "key_id": "<string identifying public key used>"
// },
// {
// "origin": "https://helper2.example",
// "payload": "<base64 encoded encrypted data>",
// "key_id": "<string identifying public key used>"
// }
// ]
// }
// Note that APIs may wish to add additional key-value pairs to this returned
// value. `this` is required to be an rvalue to avoid unnecessary copies; this
// method should only need to be called once.
base::Value::DictStorage GetAsJson() &&;
// TODO(crbug.com/1247409): Expose static method to validate that a
// base::Value appears to represent a valid report.
// Returns whether `number` is a valid number of processing origins for the
// `processing_type`.
static bool IsNumberOfProcessingOriginsValid(
size_t number,
AggregationServicePayloadContents::ProcessingType processing_type);
private:
// This vector should have an entry for each processing origin specified in
// the original AggregatableReportRequest.
std::vector<AggregationServicePayload> payloads_;
AggregatableReportSharedInfo shared_info_;
};
// Represents a request for an AggregatableReport. Contains all the data
// necessary to construct the report except for the PublicKey for each
// processing origin.
class CONTENT_EXPORT AggregatableReportRequest {
public:
// Returns `absl::nullopt` if `payload_contents` has a negative bucket or
// value. Also returns `absl::nullopt` if `processing_origins.size()` is not
// valid for the `payload_contents.processing_type` (see
// `IsNumberOfProcessingOriginsValid` above).
static absl::optional<AggregatableReportRequest> Create(
std::vector<url::Origin> processing_origins,
AggregationServicePayloadContents payload_contents,
AggregatableReportSharedInfo shared_info);
// Move-only.
AggregatableReportRequest(AggregatableReportRequest&& other);
AggregatableReportRequest& operator=(AggregatableReportRequest&& other);
~AggregatableReportRequest();
const std::vector<url::Origin>& processing_origins() const {
return processing_origins_;
}
const AggregationServicePayloadContents& payload_contents() const {
return payload_contents_;
}
const AggregatableReportSharedInfo& shared_info() const {
return shared_info_;
}
private:
// To avoid unnecessary copies, allow the provider to directly access members
// of the AggregatableReportRequest being consumed.
friend class AggregatableReport::Provider;
AggregatableReportRequest(std::vector<url::Origin> processing_origins,
AggregationServicePayloadContents payload_contents,
AggregatableReportSharedInfo shared_info);
std::vector<url::Origin> processing_origins_;
AggregationServicePayloadContents payload_contents_;
AggregatableReportSharedInfo shared_info_;
};
} // namespace content
#endif // CONTENT_BROWSER_AGGREGATION_SERVICE_AGGREGATABLE_REPORT_H_
| 3,027 |
598 |
//
// This file is subject to the terms and conditions defined in
// file 'LICENSE.md', which is part of this source code package.
//
@import CoreLocation;
@import MapKit;
BOOL MKCoordinateRegionIntersectsRegion(MKCoordinateRegion region1, MKCoordinateRegion region2);
BOOL MKCoordinateRegionContainsCoordinate(MKCoordinateRegion region, CLLocationCoordinate2D coordinate);
CLLocationDistance CLMetersBetweenCoordinates(CLLocationCoordinate2D c1, CLLocationCoordinate2D c2);
| 137 |
771 |
<reponame>revolunet/tiny-helpers
{
"name": "flexulator",
"desc": "Understand how the flexbox spacing formula works right in the browser",
"url": "https://www.flexulator.com",
"tags": [
"CSS"
],
"maintainers": [
"telagraphic"
],
"addedAt": "2020-02-28"
}
| 112 |
311 |
<reponame>thunlp/THUTag<gh_stars>100-1000
package org.thunlp.tagsuggest.train;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
import java.util.logging.Logger;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.thunlp.io.JsonUtil;
import org.thunlp.io.RecordReader;
import org.thunlp.misc.Flags;
import org.thunlp.misc.StringUtil;
import org.thunlp.tagsuggest.common.ConfigIO;
import org.thunlp.tagsuggest.common.FeatureExtractor;
import org.thunlp.tagsuggest.common.KeywordPost;
import org.thunlp.tagsuggest.common.ModelTrainer;
import org.thunlp.tagsuggest.common.Post;
import org.thunlp.tagsuggest.common.TagFilter;
import org.thunlp.tagsuggest.common.WordFeatureExtractor;
import org.thunlp.text.Lexicon;
import org.thunlp.text.Lexicon.Word;
import org.thunlp.tool.GenericTool;
public class TrainExpandRank implements GenericTool, ModelTrainer {
private static Logger LOG = Logger.getAnonymousLogger();
private WordFeatureExtractor extractor;
private TagFilter tagFilter = null;
private Properties config = null;
private Lexicon wordlex = null;
private Lexicon taglex = null;
private String fold = "";
private int minTagFreq = 1;
private Set<String> filtered = new HashSet<String>();
@Override
public void run(String[] args) throws Exception {
Flags flags = new Flags();
flags.add("input");
flags.add("output");
flags.add("config");
flags.parseAndCheck(args);
Properties config = ConfigIO.configFromString(flags.getString("config"));
train(flags.getString("input"), flags.getString("output"), config);
}
@Override
public void train(String input, String modelPath, Properties config)
throws IOException {
this.config = config;
this.fold = config.getProperty("fold", "");
minTagFreq = Integer.parseInt(config.getProperty("mintagfreq", "1"));
wordlex = new Lexicon();
taglex = new Lexicon();
WordFeatureExtractor.buildLexicons(
input, wordlex, taglex, config);
WordFeatureExtractor e = new WordFeatureExtractor(config);
e.setTagLexicon(taglex);
e.setWordLexicon(wordlex);
extractor = e;
tagFilter = new TagFilter(config, taglex);
buildIndexes(input, new File(modelPath));
}
public void buildIndexes(String input, File modelDir)
throws IOException {
if (!modelDir.exists()) {
modelDir.mkdir();
}
Set<String> whitelist = new HashSet<String>();
Set<String> blacklist = new HashSet<String>();
if (config.getProperty("whitelist", "").length() > 0) {
whitelist.addAll(
Arrays.asList(config.getProperty("whitelist", "").split(",")));
}
if (config.getProperty("blacklist", "").length() > 0) {
blacklist.addAll(
Arrays.asList(config.getProperty("blacklist", "").split(",")));
}
WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer();
JsonUtil J = new JsonUtil();
IndexWriter docsIndex =
new IndexWriter(new File(modelDir, "docs"), analyzer);
RecordReader reader = new RecordReader(input);
while (reader.next()) {
//Post p = J.fromJson(reader.value(), Post.class);
KeywordPost p = J.fromJson(reader.value(), KeywordPost.class);
if (blacklist.contains(p.getUserId())) {
continue;
}
if (whitelist.size() > 0 && !whitelist.contains(p.getUserId())) {
continue;
}
if (fold.length() > 0 && p.getExtras().equals(fold)) {
continue;
}
Document contentDoc = makeContentDoc(p);
docsIndex.addDocument(contentDoc);
if (reader.numRead() % 5000 == 0) {
LOG.info("Added " + reader.numRead() + " documents.");
}
}
reader.close();
LOG.info("Optimizing docs index...");
docsIndex.optimize();
docsIndex.close();
}
public Document makeContentDoc(KeywordPost p) {
//String [] words = extractor.extract(p);
String[] words = extractor.extractKeyword(p, true, true, true);
String docString = StringUtil.join(words, " ");
Document d = new Document();
d.add(new Field("doc_id", p.getId(),
Field.Store.YES, Field.Index.UN_TOKENIZED));
d.add(new Field("content", docString,
Field.Store.YES, Field.Index.TOKENIZED));
tagFilter.filter(p.getTags(), filtered);
d.add(new Field("tags", StringUtil.join(filtered, " "),
Field.Store.YES, Field.Index.TOKENIZED));
d.add(new Field("user_id", p.getUserId(),
Field.Store.YES, Field.Index.UN_TOKENIZED));
return d;
}
}
| 1,804 |
348 |
<filename>docs/data/leg-t2/034/03404091.json
{"nom":"<NAME>","circ":"4ème circonscription","dpt":"Hérault","inscrits":76,"abs":34,"votants":42,"blancs":3,"nuls":0,"exp":39,"res":[{"nuance":"REM","nom":"<NAME>","voix":32},{"nuance":"FN","nom":"<NAME>","voix":7}]}
| 111 |
30,023 |
<gh_stars>1000+
"""Tests for the IFTTT component."""
| 20 |
348 |
<gh_stars>100-1000
{"nom":"Aube","circ":"3ème circonscription","dpt":"Moselle","inscrits":188,"abs":82,"votants":106,"blancs":8,"nuls":3,"exp":95,"res":[{"nuance":"LR","nom":"<NAME>","voix":70},{"nuance":"REM","nom":"<NAME>","voix":25}]}
| 98 |
5,169 |
{
"name": "Alau.me",
"version": "2.1.1",
"summary": "Add referral tracking to your app on the App Store. Create unique download links to track clicks and actual installs.",
"description": "[Alau.me](http://alau.me) is an iOS App Referral Tracking API. It allows you to create short referral links pointing to your app on the App Store and then track the resulting app installs. Kind of like bit.ly, except tailored specifically to apps. But unlike bit.ly, [alau.me](http://alau.me) lets you track not just how many people clicked the link, but how many people went on to download and use your app. \n\nYou can create short links for a specific ad campaign, or you can create a unique link for each of your users, and track user-to-user referrals. Alau.me scales to millions of users and is used by hundreds of apps.\n",
"homepage": "https://github.com/LumenSpark/Alau.me",
"license": {
"type": "Commercial",
"text": "https://alau.me/home/terms"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/LumenSpark/Alau.me.git",
"tag": "2.1.1"
},
"platforms": {
"ios": "7.0"
},
"requires_arc": false,
"source_files": "Alau.me.framework/Headers/*.h",
"frameworks": "Security",
"preserve_paths": "Alau.me.framework",
"xcconfig": {
"FRAMEWORK_SEARCH_PATHS": "$(PODS_ROOT)/Alau.me",
"OTHER_LDFLAGS": "-framework Alau.me"
}
}
| 490 |
335 |
{
"word": "Septuagenarian",
"definitions": [
"A person who is between 70 and 79 years old."
],
"parts-of-speech": "Noun"
}
| 65 |
333 |
/************************************************************/
/*** C header subsection: operations on LowLevelTypes ***/
#include <string.h>
/* used by rpython.rlib.rstack */
#define OP_STACK_CURRENT(r) r = (Signed)&r
#define OP_RAW_MALLOC(size, zero, result) { \
if (zero) \
result = calloc(size, 1); \
else \
result = malloc(size); \
if (result != NULL) { \
COUNT_MALLOC; \
} \
}
#define OP_RAW_FREE(p, r) free(p); COUNT_FREE;
#define OP_RAW_MEMCLEAR(p, size, r) memset((void*)p, 0, size)
#define OP_RAW_MEMSET(p, byte, size, r) memset((void*)p, byte, size)
#define OP_RAW_MALLOC_USAGE(size, r) r = size
#if defined(MS_WINDOWS) && !defined(__MINGW32__)
#define alloca _alloca
#endif
#define OP_RAW_MEMCOPY(x,y,size,r) memcpy(y,x,size);
#define OP_RAW_MEMMOVE(x,y,size,r) memmove(y,x,size);
/************************************************************/
#define OP_FREE(p) OP_RAW_FREE(p, do_not_use)
#ifndef COUNT_OP_MALLOCS
#define COUNT_MALLOC /* nothing */
#define COUNT_FREE /* nothing */
#define pypy_malloc_counters_results() /* nothing */
#else /* COUNT_OP_MALLOCS */
static int count_mallocs=0, count_frees=0;
#define COUNT_MALLOC count_mallocs++
#define COUNT_FREE count_frees++
#define pypy_malloc_counters_results() \
printf("MALLOC COUNTERS: %d %d\n", count_mallocs, count_frees)
#endif /* COUNT_OP_MALLOCS */
/*** tracking raw mallocs and frees for debugging ***/
#ifndef RPY_ASSERT
# define OP_TRACK_ALLOC_START(addr, r) /* nothing */
# define OP_TRACK_ALLOC_STOP(addr, r) /* nothing */
# define pypy_debug_alloc_results() /* nothing */
#else /* RPY_ASSERT */
# define OP_TRACK_ALLOC_START(addr, r) pypy_debug_alloc_start(addr, \
__FUNCTION__)
# define OP_TRACK_ALLOC_STOP(addr, r) pypy_debug_alloc_stop(addr)
RPY_EXTERN void pypy_debug_alloc_start(void*, const char*);
RPY_EXTERN void pypy_debug_alloc_stop(void*);
RPY_EXTERN void pypy_debug_alloc_results(void);
#endif /* RPY_ASSERT */
/* for Boehm GC */
#ifdef PYPY_USING_BOEHM_GC
#define BOEHM_MALLOC_0_0 GC_MALLOC
#define BOEHM_MALLOC_1_0 GC_MALLOC_ATOMIC
#define BOEHM_MALLOC_0_1 GC_MALLOC
#define BOEHM_MALLOC_1_1 GC_MALLOC_ATOMIC
/* #define BOEHM_MALLOC_0_1 GC_MALLOC_IGNORE_OFF_PAGE */
/* #define BOEHM_MALLOC_1_1 GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE */
#define OP_BOEHM_ZERO_MALLOC(size, r, restype, is_atomic, is_varsize) { \
r = (restype) BOEHM_MALLOC_ ## is_atomic ## _ ## is_varsize (size); \
if (r && is_atomic) /* the non-atomic versions return cleared memory */ \
memset((void*) r, 0, size); \
}
#define OP_BOEHM_DISAPPEARING_LINK(link, obj, r) \
if (GC_base(obj) == NULL) \
; /* 'obj' is probably a prebuilt object - it makes no */ \
/* sense to register it then, and it crashes Boehm in */ \
/* quite obscure ways */ \
else \
GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj)
RPY_EXTERN int boehm_gc_finalizer_lock;
RPY_EXTERN void boehm_gc_startup_code(void);
RPY_EXTERN void boehm_gc_finalizer_notifier(void);
struct boehm_fq_s;
RPY_EXTERN struct boehm_fq_s *boehm_fq_queues[];
RPY_EXTERN void (*boehm_fq_trigger[])(void);
RPY_EXTERN void boehm_fq_register(struct boehm_fq_s **, void *);
RPY_EXTERN void *boehm_fq_next_dead(struct boehm_fq_s **);
#define OP_GC__DISABLE_FINALIZERS(r) boehm_gc_finalizer_lock++
#define OP_GC__ENABLE_FINALIZERS(r) (boehm_gc_finalizer_lock--, \
boehm_gc_finalizer_notifier())
#define OP_GC__DISABLE(r) /* nothing */
#define OP_GC__ENABLE(r) /* nothing */
#define OP_BOEHM_FQ_REGISTER(tagindex, obj, r) \
boehm_fq_register(boehm_fq_queues + tagindex, obj)
#define OP_BOEHM_FQ_NEXT_DEAD(tagindex, r) \
r = boehm_fq_next_dead(boehm_fq_queues + tagindex)
#endif /* PYPY_USING_BOEHM_GC */
#ifdef PYPY_USING_NO_GC_AT_ALL
#define OP_BOEHM_ZERO_MALLOC(size, r, restype, is_atomic, is_varsize) \
r = (restype) calloc(1, size);
#define OP_BOEHM_DISAPPEARING_LINK(link, obj, r) /* nothing */
#define OP_GC__DISABLE_FINALIZERS(r) /* nothing */
#define OP_GC__ENABLE_FINALIZERS(r) /* nothing */
#define OP_GC__DISABLE(r) /* nothing */
#define OP_GC__ENABLE(r) /* nothing */
#define GC_REGISTER_FINALIZER(a, b, c, d, e) /* nothing */
#define GC_gcollect() /* nothing */
#define GC_set_max_heap_size(a) /* nothing */
#define OP_GC_FQ_REGISTER(tag, obj, r) /* nothing */
#define OP_GC_FQ_NEXT_DEAD(tag, r) (r = NULL)
#endif
#if (defined(PYPY_USING_BOEHM_GC) || defined(PYPY_USING_NO_GC_AT_ALL)) && !defined(PYPY_BOEHM_WITH_HEADER)
# define RPY_SIZE_OF_GCHEADER 0
#else
# define RPY_SIZE_OF_GCHEADER sizeof(struct pypy_header0)
#endif
/************************************************************/
/* weakref support */
#define OP_CAST_PTR_TO_WEAKREFPTR(x, r) r = x
#define OP_CAST_WEAKREFPTR_TO_PTR(x, r) r = x
/************************************************************/
/* dummy version of these operations, e.g. with Boehm */
#define OP_GC_GET_RPY_ROOTS(r) r = 0
#define OP_GC_GET_RPY_REFERENTS(x, r) r = 0
#define OP_GC_GET_RPY_MEMORY_USAGE(x, r) r = -1
#define OP_GC_GET_RPY_TYPE_INDEX(x, r) r = -1
#define OP_GC_IS_RPY_INSTANCE(x, r) r = 0
#define OP_GC_DUMP_RPY_HEAP(fd, r) r = 0
#define OP_GC_SET_EXTRA_THRESHOLD(x, r) /* nothing */
#define OP_GC_IGNORE_FINALIZER(x, r) /* nothing */
/****************************/
/* misc stuff */
/****************************/
#ifndef _MSC_VER
# define pypy_asm_keepalive(v) asm volatile ("/* keepalive %0 */" : : \
"g" (v))
#else
# ifndef _WIN64
# define pypy_asm_keepalive(v) __asm { }
# else
/* is there something cheaper? */
# define pypy_asm_keepalive(v) _ReadWriteBarrier();
# endif
#endif
| 2,992 |
5,821 |
<gh_stars>1000+
from metaflow_test import MetaflowTest, ExpectationFailed, steps
class MergeArtifactsTest(MetaflowTest):
PRIORITY = 1
@steps(0, ['start'])
def start(self):
self.non_modified_passdown = 'a'
self.modified_to_same_value = 'b'
self.manual_merge_required = 'c'
self.ignore_me = 'd'
@steps(2, ['linear'])
def modify_things(self):
# Set to different things
from metaflow.current import current
self.manual_merge_required = current.task_id
self.ignore_me = current.task_id
self.modified_to_same_value = 'e'
assert_equals(self.non_modified_passdown, 'a')
@steps(0, ['join'], required=True)
def merge_things(self, inputs):
from metaflow.current import current
from metaflow.exception import UnhandledInMergeArtifactsException, MetaflowException
# Test to make sure non-merged values are reported
assert_exception(lambda: self.merge_artifacts(inputs), UnhandledInMergeArtifactsException)
# Test to make sure nothing is set if failed merge_artifacts
assert(not hasattr(self, 'non_modified_passdown'))
assert(not hasattr(self, 'manual_merge_required'))
# Test to make sure that only one of exclude/include is used
assert_exception(lambda: self.merge_artifacts(
inputs,
exclude=['ignore_me'],
include=['non_modified_passdown']), MetaflowException)
# Test to make sure nothing is set if failed merge_artifacts
assert(not hasattr(self, 'non_modified_passdown'))
assert(not hasattr(self, 'manual_merge_required'))
# Test actual merge (ignores set values and excluded names, merges common and non modified)
self.manual_merge_required = current.task_id
self.merge_artifacts(inputs, exclude=['ignore_me'])
# Ensure that everything we expect is passed down
assert_equals(self.non_modified_passdown, 'a')
assert_equals(self.modified_to_same_value, 'e')
assert_equals(self.manual_merge_required, current.task_id)
assert(not hasattr(self, 'ignore_me'))
@steps(0, ['end'])
def end(self):
from metaflow.exception import MetaflowException
# This is not a join so test exception for calling in non-join
assert_exception(lambda: self.merge_artifacts([]), MetaflowException)
# Check that all values made it through
assert_equals(self.non_modified_passdown, 'a')
assert_equals(self.modified_to_same_value, 'e')
assert(hasattr(self, 'manual_merge_required'))
@steps(3, ['all'])
def step_all(self):
assert_equals(self.non_modified_passdown, 'a')
| 1,101 |
1,350 |
<gh_stars>1000+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.cdn.models;
import com.azure.core.annotation.Immutable;
import com.azure.core.util.logging.ClientLogger;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
/** The JSON object that contains the properties of the origin. */
@Immutable
public final class OriginProperties extends OriginUpdatePropertiesParameters {
@JsonIgnore private final ClientLogger logger = new ClientLogger(OriginProperties.class);
/*
* Resource status of the origin.
*/
@JsonProperty(value = "resourceState", access = JsonProperty.Access.WRITE_ONLY)
private OriginResourceState resourceState;
/*
* Provisioning status of the origin.
*/
@JsonProperty(value = "provisioningState", access = JsonProperty.Access.WRITE_ONLY)
private String provisioningState;
/*
* The approval status for the connection to the Private Link
*/
@JsonProperty(value = "privateEndpointStatus", access = JsonProperty.Access.WRITE_ONLY)
private PrivateEndpointStatus privateEndpointStatus;
/**
* Get the resourceState property: Resource status of the origin.
*
* @return the resourceState value.
*/
public OriginResourceState resourceState() {
return this.resourceState;
}
/**
* Get the provisioningState property: Provisioning status of the origin.
*
* @return the provisioningState value.
*/
public String provisioningState() {
return this.provisioningState;
}
/**
* Get the privateEndpointStatus property: The approval status for the connection to the Private Link.
*
* @return the privateEndpointStatus value.
*/
public PrivateEndpointStatus privateEndpointStatus() {
return this.privateEndpointStatus;
}
/** {@inheritDoc} */
@Override
public OriginProperties withHostname(String hostname) {
super.withHostname(hostname);
return this;
}
/** {@inheritDoc} */
@Override
public OriginProperties withHttpPort(Integer httpPort) {
super.withHttpPort(httpPort);
return this;
}
/** {@inheritDoc} */
@Override
public OriginProperties withHttpsPort(Integer httpsPort) {
super.withHttpsPort(httpsPort);
return this;
}
/** {@inheritDoc} */
@Override
public OriginProperties withOriginHostHeader(String originHostHeader) {
super.withOriginHostHeader(originHostHeader);
return this;
}
/** {@inheritDoc} */
@Override
public OriginProperties withPriority(Integer priority) {
super.withPriority(priority);
return this;
}
/** {@inheritDoc} */
@Override
public OriginProperties withWeight(Integer weight) {
super.withWeight(weight);
return this;
}
/** {@inheritDoc} */
@Override
public OriginProperties withEnabled(Boolean enabled) {
super.withEnabled(enabled);
return this;
}
/** {@inheritDoc} */
@Override
public OriginProperties withPrivateLinkAlias(String privateLinkAlias) {
super.withPrivateLinkAlias(privateLinkAlias);
return this;
}
/** {@inheritDoc} */
@Override
public OriginProperties withPrivateLinkResourceId(String privateLinkResourceId) {
super.withPrivateLinkResourceId(privateLinkResourceId);
return this;
}
/** {@inheritDoc} */
@Override
public OriginProperties withPrivateLinkLocation(String privateLinkLocation) {
super.withPrivateLinkLocation(privateLinkLocation);
return this;
}
/** {@inheritDoc} */
@Override
public OriginProperties withPrivateLinkApprovalMessage(String privateLinkApprovalMessage) {
super.withPrivateLinkApprovalMessage(privateLinkApprovalMessage);
return this;
}
/**
* Validates the instance.
*
* @throws IllegalArgumentException thrown if the instance is not valid.
*/
@Override
public void validate() {
super.validate();
}
}
| 1,518 |
390 |
{"dependencies":{},"fastboot":{"appName":"fastboot-app","config":{"fastboot-app":{"APP":{"autoboot":false,"name":"fastboot-app","version":"0.0.0+fcd8aedb"},"EmberENV":{"EXTEND_PROTOTYPES":{"Date":false},"FEATURES":{},"_APPLICATION_TEMPLATE_WRAPPER":false,"_DEFAULT_ASYNC_OBSERVERS":true,"_JQUERY_INTEGRATION":false,"_TEMPLATE_ONLY_GLIMMER_COMPONENTS":true},"environment":"development","exportApplicationGlobal":true,"locationType":"auto","modulePrefix":"fastboot-app","rootURL":"/"}},"manifest":{"appFiles":["assets/fastboot-app.js","assets/fastboot-app-fastboot.js"],"htmlFile":"index.html","vendorFiles":["assets/vendor.js","assets/auto-import-fastboot.js"]},"moduleWhitelist":[],"schemaVersion":3}}
| 235 |
1,240 |
<gh_stars>1000+
// RadarScroller.h
// XXP
//
// Created by <NAME> on Mon Feb 17 2003.
#import <Foundation/Foundation.h>
@interface RadarScroller : NSScroller {
NSMutableDictionary *I_marks;
float I_maxHeight;
}
- (void)setMaxHeight:(int)maxHeight;
- (void)setMarkFor:(NSString *)aIdentifier withColor:(NSColor *)aColor
forMinLocation:(float)aMinLocation andMaxLocation:(float)aMaxLocation;
- (void)removeMarkFor:(NSString *)aIdentifier;
@end
| 174 |
5,730 |
package org.ansj.recognition.impl;
import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.NlpAnalysis;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.List;
/**
* 词性识别测试
*
* @author Ansj
*
*/
public class NatureRecognitionTest {
@Test
public void test() {
System.out.println(NlpAnalysis.parse("结婚的和尚未结婚的孙建是一个好人").recognition(new NatureRecognition()));
}
@Test
public void natureGuess() {
System.out.println(NatureRecognition.guessNature("北京英富森股份有限公司").nature);
System.out.println(NatureRecognition.guessNature("尹科").nature);
System.out.println(NatureRecognition.guessNature("保福寺桥").nature);
System.out.println(NatureRecognition.guessNature("爱丽丝布衣诺夫").nature);
}
@Test
public void recognitionList() {
String string = "结婚的和尚未结婚的孙建是一个好人";
List<String> words = new ArrayList<>();
words.add(string);
List<Term> terms = new NatureRecognition().recognition(words);
System.out.println(terms.get(0).toString());
Assert.assertEquals(terms.get(0).toString(), string);
}
@Test
public void recognitionListAndInt() {
String string = "结婚的和尚未结婚的孙建是一个好人";
List<String> words = new ArrayList<>();
words.add(string);
List<Term> terms = new NatureRecognition().recognition(words, 0);
System.out.println(terms.get(0).toString());
Assert.assertEquals(terms.get(0).toString(), string);
}
}
| 632 |
445 |
/////////////////////////////////////////////////////////////////////////
// $Id: sse_move.cc,v 1.95 2008/08/16 12:19:30 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2003 <NAME>
// Written by <NAME> [sshwarts at sourceforge net]
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
/////////////////////////////////////////////////////////////////////////
#define NEED_CPU_REG_SHORTCUTS 1
#include "bochs.h"
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
#if BX_SUPPORT_SSE
void BX_CPU_C::print_state_SSE(void)
{
BX_DEBUG(("MXCSR: 0x%08x\n", BX_MXCSR_REGISTER));
for(unsigned n=0;n<BX_XMM_REGISTERS;n++) {
BxPackedXmmRegister xmm = BX_READ_XMM_REG(n);
BX_DEBUG(("XMM%02u: %08x%08x:%08x%08x\n", n,
xmm.xmm32u(3), xmm.xmm32u(2), xmm.xmm32u(1), xmm.xmm32u(0)));
}
}
#endif
#if BX_SUPPORT_FPU
Bit8u BX_CPU_C::pack_FPU_TW(Bit16u twd)
{
Bit8u tag_byte = 0;
if((twd & 0x0003) != 0x0003) tag_byte |= 0x01;
if((twd & 0x000c) != 0x000c) tag_byte |= 0x02;
if((twd & 0x0030) != 0x0030) tag_byte |= 0x04;
if((twd & 0x00c0) != 0x00c0) tag_byte |= 0x08;
if((twd & 0x0300) != 0x0300) tag_byte |= 0x10;
if((twd & 0x0c00) != 0x0c00) tag_byte |= 0x20;
if((twd & 0x3000) != 0x3000) tag_byte |= 0x40;
if((twd & 0xc000) != 0xc000) tag_byte |= 0x80;
return tag_byte;
}
Bit16u BX_CPU_C::unpack_FPU_TW(Bit16u tag_byte)
{
Bit32u twd = 0;
/* FTW
*
* Note that the original format for FTW can be recreated from the stored
* FTW valid bits and the stored 80-bit FP data (assuming the stored data
* was not the contents of MMX registers) using the following table:
| Exponent | Exponent | Fraction | J,M bits | FTW valid | x87 FTW |
| all 1s | all 0s | all 0s | | | |
-------------------------------------------------------------------
| 0 | 0 | 0 | 0x | 1 | S 10 |
| 0 | 0 | 0 | 1x | 1 | V 00 |
-------------------------------------------------------------------
| 0 | 0 | 1 | 00 | 1 | S 10 |
| 0 | 0 | 1 | 10 | 1 | V 00 |
-------------------------------------------------------------------
| 0 | 1 | 0 | 0x | 1 | S 10 |
| 0 | 1 | 0 | 1x | 1 | S 10 |
-------------------------------------------------------------------
| 0 | 1 | 1 | 00 | 1 | Z 01 |
| 0 | 1 | 1 | 10 | 1 | S 10 |
-------------------------------------------------------------------
| 1 | 0 | 0 | 1x | 1 | S 10 |
| 1 | 0 | 0 | 1x | 1 | S 10 |
-------------------------------------------------------------------
| 1 | 0 | 1 | 00 | 1 | S 10 |
| 1 | 0 | 1 | 10 | 1 | S 10 |
-------------------------------------------------------------------
| all combinations above | 0 | E 11 |
*
* The J-bit is defined to be the 1-bit binary integer to the left of
* the decimal place in the significand.
*
* The M-bit is defined to be the most significant bit of the fractional
* portion of the significand (i.e., the bit immediately to the right of
* the decimal place). When the M-bit is the most significant bit of the
* fractional portion of the significand, it must be 0 if the fraction
* is all 0's.
*/
for(int index = 7;index >= 0; index--, twd <<= 2, tag_byte <<= 1)
{
if(tag_byte & 0x80) {
const floatx80 &fpu_reg = BX_FPU_REG(index);
twd |= FPU_tagof(fpu_reg);
}
else {
twd |= FPU_Tag_Empty;
}
}
return (twd >> 2);
}
#endif
/* ************************************ */
/* SSE: SAVE/RESTORE FPU/MMX/SSEx STATE */
/* ************************************ */
/* 0F AE Grp15 010 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::LDMXCSR(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
Bit32u new_mxcsr = read_virtual_dword(i->seg(), eaddr);
if(new_mxcsr & ~MXCSR_MASK)
exception(BX_GP_EXCEPTION, 0, 0);
BX_MXCSR_REGISTER = new_mxcsr;
#else
BX_INFO(("LDMXCSR: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 0F AE Grp15 011 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::STMXCSR(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
Bit32u mxcsr = BX_MXCSR_REGISTER & MXCSR_MASK;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
write_virtual_dword(i->seg(), eaddr, mxcsr);
#else
BX_INFO(("STMXCSR: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 0F AE Grp15 000 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::FXSAVE(bxInstruction_c *i)
{
#if BX_CPU_LEVEL >= 6
unsigned index;
BxPackedXmmRegister xmm;
BX_DEBUG(("FXSAVE: save FPU/MMX/SSE state"));
#if BX_SUPPORT_MMX
if(BX_CPU_THIS_PTR cr0.get_TS())
exception(BX_NM_EXCEPTION, 0, 0);
if(BX_CPU_THIS_PTR cr0.get_EM())
exception(BX_UD_EXCEPTION, 0, 0);
#endif
xmm.xmm16u(0) = BX_CPU_THIS_PTR the_i387.get_control_word();
xmm.xmm16u(1) = BX_CPU_THIS_PTR the_i387.get_status_word();
xmm.xmm16u(2) = pack_FPU_TW(BX_CPU_THIS_PTR the_i387.get_tag_word());
/* x87 FPU Opcode (16 bits) */
/* The lower 11 bits contain the FPU opcode, upper 5 bits are reserved */
xmm.xmm16u(3) = BX_CPU_THIS_PTR the_i387.foo;
/*
* x87 FPU IP Offset (32/64 bits)
* The contents of this field differ depending on the current
* addressing mode (16/32/64 bit) when the FXSAVE instruction was executed:
* + 64-bit mode - 64-bit IP offset
* + 32-bit mode - 32-bit IP offset
* + 16-bit mode - low 16 bits are IP offset; high 16 bits are reserved.
* x87 CS FPU IP Selector
* + 16 bit, in 16/32 bit mode only
*/
#if BX_SUPPORT_X86_64
if (i->os64L()) /* 64 bit operand size mode */
{
xmm.xmm64u(1) = (BX_CPU_THIS_PTR the_i387.fip);
}
else
#endif
{
xmm.xmm32u(2) = (Bit32u)(BX_CPU_THIS_PTR the_i387.fip);
xmm.xmm32u(3) = (BX_CPU_THIS_PTR the_i387.fcs);
}
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
write_virtual_dqword_aligned(i->seg(), eaddr, (Bit8u *) &xmm);
/*
* x87 FPU Instruction Operand (Data) Pointer Offset (32/64 bits)
* The contents of this field differ depending on the current
* addressing mode (16/32 bit) when the FXSAVE instruction was executed:
* + 64-bit mode - 64-bit offset
* + 32-bit mode - 32-bit offset
* + 16-bit mode - low 16 bits are offset; high 16 bits are reserved.
* x87 DS FPU Instruction Operand (Data) Pointer Selector
* + 16 bit, in 16/32 bit mode only
*/
#if BX_SUPPORT_X86_64
if (i->os64L()) /* 64 bit operand size mode */
{
xmm.xmm64u(0) = (BX_CPU_THIS_PTR the_i387.fdp);
}
else
#endif
{
xmm.xmm32u(0) = (Bit32u)(BX_CPU_THIS_PTR the_i387.fdp);
xmm.xmm32u(1) = (BX_CPU_THIS_PTR the_i387.fds);
}
#if BX_SUPPORT_SSE >= 1
xmm.xmm32u(2) = BX_MXCSR_REGISTER;
xmm.xmm32u(3) = MXCSR_MASK;
#else
xmm.xmm32u(2) = 0;
xmm.xmm32u(3) = 0;
#endif
write_virtual_dqword(i->seg(), eaddr + 16, (Bit8u *) &xmm);
/* store i387 register file */
for(index=0; index < 8; index++)
{
const floatx80 &fp = BX_FPU_REG(index);
xmm.xmm64u(0) = fp.fraction;
xmm.xmm64u(1) = 0;
xmm.xmm16u(4) = fp.exp;
write_virtual_dqword(i->seg(), eaddr+index*16+32, (Bit8u *) &xmm);
}
#if BX_SUPPORT_X86_64
if (BX_CPU_THIS_PTR efer.get_FFXSR() && CPL == 0 && Is64BitMode())
return; // skip saving of the XMM state
#endif
#if BX_SUPPORT_SSE >= 1
/* store XMM register file */
for(index=0; index < BX_XMM_REGISTERS; index++)
{
// save XMM8-XMM15 only in 64-bit mode
if (index < 8 || Is64BitMode()) {
write_virtual_dqword(i->seg(),
eaddr+index*16+160, (Bit8u *) &(BX_CPU_THIS_PTR xmm[index]));
}
}
#endif
/* do not touch reserved fields */
#else
BX_INFO(("FXSAVE: required P6 support, use --enable-cpu-level=6 option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 0F AE Grp15 001 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::FXRSTOR(bxInstruction_c *i)
{
#if BX_CPU_LEVEL >= 6
BxPackedXmmRegister xmm;
unsigned index;
BX_DEBUG(("FXRSTOR: restore FPU/MMX/SSE state"));
#if BX_SUPPORT_MMX
if(BX_CPU_THIS_PTR cr0.get_TS())
exception(BX_NM_EXCEPTION, 0, 0);
if(BX_CPU_THIS_PTR cr0.get_EM())
exception(BX_UD_EXCEPTION, 0, 0);
#endif
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
read_virtual_dqword_aligned(i->seg(), eaddr, (Bit8u *) &xmm);
BX_CPU_THIS_PTR the_i387.cwd = xmm.xmm16u(0);
BX_CPU_THIS_PTR the_i387.swd = xmm.xmm16u(1);
BX_CPU_THIS_PTR the_i387.tos = (xmm.xmm16u(1) >> 11) & 0x07;
/* Restore x87 FPU Opcode */
/* The lower 11 bits contain the FPU opcode, upper 5 bits are reserved */
BX_CPU_THIS_PTR the_i387.foo = xmm.xmm16u(3) & 0x7FF;
/* Restore x87 FPU IP */
#if BX_SUPPORT_X86_64
if (i->os64L()) {
BX_CPU_THIS_PTR the_i387.fip = xmm.xmm64u(1);
BX_CPU_THIS_PTR the_i387.fcs = 0;
}
else
#endif
{
BX_CPU_THIS_PTR the_i387.fip = xmm.xmm32u(2);
BX_CPU_THIS_PTR the_i387.fcs = xmm.xmm16u(6);
}
Bit32u tag_byte = xmm.xmmubyte(4);
/* Restore x87 FPU DP */
read_virtual_dqword(i->seg(), eaddr + 16, (Bit8u *) &xmm);
#if BX_SUPPORT_X86_64
if (i->os64L()) {
BX_CPU_THIS_PTR the_i387.fdp = xmm.xmm64u(0);
BX_CPU_THIS_PTR the_i387.fds = 0;
}
else
#endif
{
BX_CPU_THIS_PTR the_i387.fdp = xmm.xmm32u(0);
BX_CPU_THIS_PTR the_i387.fds = xmm.xmm16u(2);
}
#if BX_SUPPORT_SSE >= 1
/* If the OSFXSR bit in CR4 is not set, the FXRSTOR instruction does
not restore the states of the XMM and MXCSR registers. */
if(BX_CPU_THIS_PTR cr4.get_OSFXSR())
{
Bit32u new_mxcsr = xmm.xmm32u(2);
if(new_mxcsr & ~MXCSR_MASK)
exception(BX_GP_EXCEPTION, 0, 0);
BX_MXCSR_REGISTER = new_mxcsr;
}
#endif
/* load i387 register file */
for(index=0; index < 8; index++)
{
floatx80 reg;
reg.fraction = read_virtual_qword(i->seg(), eaddr+index*16+32);
reg.exp = read_virtual_word (i->seg(), eaddr+index*16+40);
BX_FPU_REG(index) = reg;
}
BX_CPU_THIS_PTR the_i387.twd = unpack_FPU_TW(tag_byte);
#if BX_SUPPORT_X86_64
if (BX_CPU_THIS_PTR efer.get_FFXSR() && CPL == 0 && Is64BitMode())
return; // skip restore of the XMM state
#endif
#if BX_SUPPORT_SSE >= 1
/* If the OSFXSR bit in CR4 is not set, the FXRSTOR instruction does
not restore the states of the XMM and MXCSR registers. */
if(BX_CPU_THIS_PTR cr4.get_OSFXSR())
{
/* load XMM register file */
for(index=0; index < BX_XMM_REGISTERS; index++)
{
// restore XMM8-XMM15 only in 64-bit mode
if (index < 8 || Is64BitMode()) {
read_virtual_dqword(i->seg(),
eaddr+index*16+160, (Bit8u *) &(BX_CPU_THIS_PTR xmm[index]));
}
}
}
#endif
#else
BX_INFO(("FXRSTOR: required P6 support, use --enable-cpu-level=6 option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* *************************** */
/* SSE: MEMORY MOVE OPERATIONS */
/* *************************** */
/* All these opcodes never generate SIMD floating point exeptions */
/* MOVUPS: 0F 10 */
/* MOVUPD: 66 0F 10 */
/* MOVDQU: F3 0F 6F */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVUPS_VpsWps(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op;
/* op is a register or memory reference */
if (i->modC0()) {
op = BX_READ_XMM_REG(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
read_virtual_dqword(i->seg(), eaddr, (Bit8u *) &op);
}
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), op);
#else
BX_INFO(("MOVUPS_VpsWps: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* MOVUPS: 0F 11 */
/* MOVUPD: 66 0F 11 */
/* MOVDQU: F3 0F 7F */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVUPS_WpsVps(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op = BX_READ_XMM_REG(i->nnn());
/* op is a register or memory reference */
if (i->modC0()) {
BX_WRITE_XMM_REG(i->rm(), op);
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
write_virtual_dqword(i->seg(), eaddr, (Bit8u *) &op);
}
#else
BX_INFO(("MOVUPS_WpsVps: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* MOVAPS: 0F 28 */
/* MOVAPD: 66 0F 28 */
/* MOVDQA: F3 0F 6F */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVAPS_VpsWps(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op;
/* op is a register or memory reference */
if (i->modC0()) {
op = BX_READ_XMM_REG(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
read_virtual_dqword_aligned(i->seg(), eaddr, (Bit8u *) &op);
}
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), op);
#else
BX_INFO(("MOVAPS_VpsWps: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* MOVAPS: 0F 29 */
/* MOVAPD: 66 0F 29 */
/* MOVDQA: F3 0F 7F */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVAPS_WpsVps(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op = BX_READ_XMM_REG(i->nnn());
/* op is a register or memory reference */
if (i->modC0()) {
BX_WRITE_XMM_REG(i->rm(), op);
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
write_virtual_dqword_aligned(i->seg(), eaddr, (Bit8u *) &op);
}
#else
BX_INFO(("MOVAPS_WpsVps: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* F3 0F 10 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSS_VssWss(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op = BX_READ_XMM_REG(i->nnn());
/* op2 is a register or memory reference */
if (i->modC0())
{
/* If the source operand is an XMM register, the high-order
96 bits of the destination XMM register are not modified. */
op.xmm32u(0) = BX_READ_XMM_REG_LO_DWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* If the source operand is a memory location, the high-order
96 bits of the destination XMM register are cleared to 0s */
op.xmm32u(0) = read_virtual_dword(i->seg(), eaddr);
op.xmm32u(1) = 0;
op.xmm64u(1) = 0;
}
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), op);
#else
BX_INFO(("MOVSS_VssWss: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* F3 0F 11 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSS_WssVss(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
Bit32u val32 = BX_READ_XMM_REG_LO_DWORD(i->nnn());
/* destination is a register or memory reference */
if (i->modC0())
{
/* If the source operand is an XMM register, the high-order
96 bits of the destination XMM register are not modified. */
BX_WRITE_XMM_REG_LO_DWORD(i->rm(), val32);
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
write_virtual_dword(i->seg(), eaddr, val32);
}
#else
BX_INFO(("MOVSS_WssVss: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* F2 0F 10 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSD_VsdWsd(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op = BX_READ_XMM_REG(i->nnn());
/* op2 is a register or memory reference */
if (i->modC0())
{
/* If the source operand is an XMM register, the high-order
64 bits of the destination XMM register are not modified. */
op.xmm64u(0) = BX_READ_XMM_REG_LO_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* If the source operand is a memory location, the high-order
64 bits of the destination XMM register are cleared to 0s */
op.xmm64u(0) = read_virtual_qword(i->seg(), eaddr);
op.xmm64u(1) = 0;
}
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), op);
#else
BX_INFO(("MOVSD_VsdWsd: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* F2 0F 11 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSD_WsdVsd(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
Bit64u val64 = BX_READ_XMM_REG_LO_QWORD(i->nnn());
/* destination is a register or memory reference */
if (i->modC0())
{
/* If the source operand is an XMM register, the high-order
64 bits of the destination XMM register are not modified. */
BX_WRITE_XMM_REG_LO_QWORD(i->rm(), val64);
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
write_virtual_qword(i->seg(), eaddr, val64);
}
#else
BX_INFO(("MOVSD_WsdVsd: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* MOVLPS: 0F 12 */
/* MOVLPD: 66 0F 12 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVLPS_VpsMq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
Bit64u val64;
if (i->modC0()) /* MOVHLPS xmm1, xmm2 opcode */
{
val64 = BX_READ_XMM_REG_HI_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val64 = read_virtual_qword(i->seg(), eaddr);
}
/* now write result back to destination */
BX_WRITE_XMM_REG_LO_QWORD(i->nnn(), val64);
#else
BX_INFO(("MOVLPS_VpsMq: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* F2 0F 12 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVDDUP_VpdWq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 3
BX_CPU_THIS_PTR prepareSSE();
Bit64u val64;
BxPackedXmmRegister op;
if (i->modC0())
{
val64 = BX_READ_XMM_REG_LO_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val64 = read_virtual_qword(i->seg(), eaddr);
}
op.xmm64u(0) = val64;
op.xmm64u(1) = val64;
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), op);
#else
BX_INFO(("MOVDDUP_VpdWq: required SSE3, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* F3 0F 12 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSLDUP_VpsWps(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 3
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op, result;
/* op is a register or memory reference */
if (i->modC0()) {
op = BX_READ_XMM_REG(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
readVirtualDQwordAligned(i->seg(), eaddr, (Bit8u *) &op);
}
result.xmm32u(0) = op.xmm32u(0);
result.xmm32u(1) = op.xmm32u(0);
result.xmm32u(2) = op.xmm32u(2);
result.xmm32u(3) = op.xmm32u(2);
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("MOVSLDUP_VpsWps: required SSE3, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* F3 0F 16 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSHDUP_VpsWps(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 3
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op, result;
/* op is a register or memory reference */
if (i->modC0()) {
op = BX_READ_XMM_REG(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
readVirtualDQwordAligned(i->seg(), eaddr, (Bit8u *) &op);
}
result.xmm32u(0) = op.xmm32u(1);
result.xmm32u(1) = op.xmm32u(1);
result.xmm32u(2) = op.xmm32u(3);
result.xmm32u(3) = op.xmm32u(3);
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("MOVHLDUP_VpsWps: required SSE3, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* MOVLPS: 0F 13 */
/* MOVLPD: 66 0F 13 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVLPS_MqVps(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
write_virtual_qword(i->seg(), eaddr, BX_XMM_REG_LO_QWORD(i->nnn()));
#else
BX_INFO(("MOVLPS_MqVps: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* MOVHPS: 0F 16 */
/* MOVHPD: 66 0F 16 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVHPS_VpsMq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
Bit64u val64;
if (i->modC0()) /* MOVLHPS xmm1, xmm2 opcode */
{
val64 = BX_READ_XMM_REG_LO_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val64 = read_virtual_qword(i->seg(), eaddr);
}
/* now write result back to destination */
BX_WRITE_XMM_REG_HI_QWORD(i->nnn(), val64);
#else
BX_INFO(("MOVHPS_VpsMq: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* MOVHPS: 0F 17 */
/* MOVHPD: 66 0F 17 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVHPS_MqVps(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
write_virtual_qword(i->seg(), eaddr, BX_XMM_REG_HI_QWORD(i->nnn()));
#else
BX_INFO(("MOVHPS_MqVps: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* F2 0F F0 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::LDDQU_VdqMdq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 3
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
read_virtual_dqword(i->seg(), eaddr, (Bit8u *) &op);
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), op);
#else
BX_INFO(("LDDQU_VdqMdq: required SSE3, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F F7 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MASKMOVDQU_VdqUdq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
bx_address rdi;
BxPackedXmmRegister op = BX_READ_XMM_REG(i->nnn()),
mask = BX_READ_XMM_REG(i->rm()), temp;
#if BX_SUPPORT_X86_64
if (i->as64L()) { /* 64 bit address mode */
rdi = RDI;
}
else
#endif
if (i->as32L()) {
rdi = EDI;
}
else { /* 16 bit address mode */
rdi = DI;
}
/* no data will be written to memory if mask is all 0s */
if ((mask.xmm64u(0) | mask.xmm64u(1)) == 0) return;
/* implement as read-modify-write for efficiency */
read_virtual_dqword(BX_SEG_REG_DS, rdi, (Bit8u *) &temp);
for(unsigned j=0; j<16; j++) {
if(mask.xmmubyte(j) & 0x80) temp.xmmubyte(j) = op.xmmubyte(j);
}
/* and write result back to the memory */
write_virtual_dqword(i->seg(), rdi, (Bit8u *) &temp);
#else
BX_INFO(("MASKMOVDQU_VdqUdq: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 0F 50 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVMSKPS_GdVRps(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op = BX_READ_XMM_REG(i->nnn());
Bit32u val32 = 0;
if(op.xmm32u(0) & 0x80000000) val32 |= 0x1;
if(op.xmm32u(1) & 0x80000000) val32 |= 0x2;
if(op.xmm32u(2) & 0x80000000) val32 |= 0x4;
if(op.xmm32u(3) & 0x80000000) val32 |= 0x8;
BX_WRITE_32BIT_REGZ(i->rm(), val32);
#else
BX_INFO(("MOVMSKPS_GdVRps: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 50 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVMSKPD_GdVRpd(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op = BX_READ_XMM_REG(i->nnn());
Bit32u val32 = 0;
if(op.xmm32u(1) & 0x80000000) val32 |= 0x1;
if(op.xmm32u(3) & 0x80000000) val32 |= 0x2;
BX_WRITE_32BIT_REGZ(i->rm(), val32);
#else
BX_INFO(("MOVMSKPD_GdVRpd: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 6E */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVD_VdqEd(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op1;
Bit32u op2;
/* op2 is a register or memory reference */
if (i->modC0()) {
op2 = BX_READ_32BIT_REG(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
op2 = read_virtual_dword(i->seg(), eaddr);
}
op1.xmm64u(0) = (Bit64u)(op2);
op1.xmm64u(1) = 0;
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), op1);
#else
BX_INFO(("MOVD_VdqEd: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
#if BX_SUPPORT_X86_64
/* 66 0F 6E */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVQ_VdqEq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op1;
Bit64u op2;
/* op2 is a register or memory reference */
if (i->modC0()) {
op2 = BX_READ_64BIT_REG(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
op2 = read_virtual_qword_64(i->seg(), eaddr);
}
op1.xmm64u(0) = op2;
op1.xmm64u(1) = 0;
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), op1);
#else
BX_INFO(("MOVQ_VdqEq: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
#endif
/* 66 0F 7E */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVD_EdVd(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
Bit32u op2 = BX_READ_XMM_REG_LO_DWORD(i->nnn());
/* destination is a register or memory reference */
if (i->modC0()) {
BX_WRITE_32BIT_REGZ(i->rm(), op2);
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
write_virtual_dword(i->seg(), eaddr, op2);
}
#else
BX_INFO(("MOVD_EdVd: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
#if BX_SUPPORT_X86_64
/* 66 0F 7E */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVQ_EqVq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
Bit64u op2 = BX_READ_XMM_REG_LO_QWORD(i->nnn());
/* destination is a register or memory reference */
if (i->modC0()) {
BX_WRITE_64BIT_REG(i->rm(), op2);
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
write_virtual_qword_64(i->seg(), eaddr, op2);
}
#else
BX_INFO(("MOVQ_EqVq: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
#endif
/* F3 0F 7E */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVQ_VqWq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op;
if (i->modC0()) {
op.xmm64u(0) = BX_READ_XMM_REG_LO_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
op.xmm64u(0) = read_virtual_qword(i->seg(), eaddr);
}
/* zero-extension to 128 bit */
op.xmm64u(1) = 0;
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), op);
#else
BX_INFO(("MOVQ_VqWq: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F D6 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVQ_WqVq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op = BX_READ_XMM_REG(i->nnn());
if (i->modC0())
{
op.xmm64u(1) = 0; /* zero-extension to 128 bits */
BX_WRITE_XMM_REG(i->rm(), op);
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
write_virtual_qword(i->seg(), eaddr, op.xmm64u(0));
}
#else
BX_INFO(("MOVQ_WqVq: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* F2 0F D6 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVDQ2Q_PqVRq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
BX_CPU_THIS_PTR prepareFPU2MMX();
BxPackedMmxRegister mm;
MMXUQ(mm) = BX_READ_XMM_REG_LO_QWORD(i->nnn());
BX_WRITE_MMX_REG(i->rm(), mm);
#else
BX_INFO(("MOVDQ2Q_PqVRq: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* F3 0F D6 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVQ2DQ_VdqQq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
BX_CPU_THIS_PTR prepareFPU2MMX();
BxPackedXmmRegister op;
BxPackedMmxRegister mm = BX_READ_MMX_REG(i->nnn());
op.xmm64u(0) = MMXUQ(mm);
op.xmm64u(1) = 0;
BX_WRITE_XMM_REG(i->rm(), op);
#else
BX_INFO(("MOVQ2DQ_VdqQq: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F D7 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVMSKB_GdUdq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op = BX_READ_XMM_REG(i->rm());
Bit32u result = 0;
if(op.xmmubyte(0x0) & 0x80) result |= 0x0001;
if(op.xmmubyte(0x1) & 0x80) result |= 0x0002;
if(op.xmmubyte(0x2) & 0x80) result |= 0x0004;
if(op.xmmubyte(0x3) & 0x80) result |= 0x0008;
if(op.xmmubyte(0x4) & 0x80) result |= 0x0010;
if(op.xmmubyte(0x5) & 0x80) result |= 0x0020;
if(op.xmmubyte(0x6) & 0x80) result |= 0x0040;
if(op.xmmubyte(0x7) & 0x80) result |= 0x0080;
if(op.xmmubyte(0x8) & 0x80) result |= 0x0100;
if(op.xmmubyte(0x9) & 0x80) result |= 0x0200;
if(op.xmmubyte(0xA) & 0x80) result |= 0x0400;
if(op.xmmubyte(0xB) & 0x80) result |= 0x0800;
if(op.xmmubyte(0xC) & 0x80) result |= 0x1000;
if(op.xmmubyte(0xD) & 0x80) result |= 0x2000;
if(op.xmmubyte(0xE) & 0x80) result |= 0x4000;
if(op.xmmubyte(0xF) & 0x80) result |= 0x8000;
/* now write result back to destination */
BX_WRITE_32BIT_REGZ(i->nnn(), result);
#else
BX_INFO(("PMOVMSKB_GdUdq: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* **************************** */
/* SSE: STORE DATA NON-TEMPORAL */
/* **************************** */
/* 0F C3 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVNTI_MdGd(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
write_virtual_dword(i->seg(), eaddr, BX_READ_32BIT_REG(i->nnn()));
#else
BX_INFO(("MOVNTI_MdGd: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
#if BX_SUPPORT_X86_64
/* 0F C3 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVNTI_MqGq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 2
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
write_virtual_qword_64(i->seg(), eaddr, BX_READ_64BIT_REG(i->nnn()));
#else
BX_INFO(("MOVNTI_MqGq: required SSE2, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
#endif
/* MOVNTPS: 0F 2B */
/* MOVNTPD: 66 0F 2B */
/* MOVNTDQ: 66 0F E7 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVNTPS_MpsVps(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 1
BX_CPU_THIS_PTR prepareSSE();
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
write_virtual_dqword_aligned(i->seg(), eaddr, (Bit8u *)(&BX_READ_XMM_REG(i->nnn())));
#else
BX_INFO(("MOVNTPS_MpsVps: required SSE, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* ************************** */
/* 3-BYTE-OPCODE INSTRUCTIONS */
/* ************************** */
#if (BX_SUPPORT_SSE >= 4) || (BX_SUPPORT_SSE >= 3 && BX_SUPPORT_SSE_EXTENSION > 0)
/* 66 0F 38 20 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVSXBW_VdqWq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit64u val64;
if (i->modC0())
{
val64 = BX_READ_XMM_REG_LO_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val64 = read_virtual_qword(i->seg(), eaddr);
}
result.xmm16u(0) = (Bit8s) (val64 & 0xFF);
result.xmm16u(1) = (Bit8s) ((val64 >> 8) & 0xFF);
result.xmm16u(2) = (Bit8s) ((val64 >> 16) & 0xFF);
result.xmm16u(3) = (Bit8s) ((val64 >> 24) & 0xFF);
result.xmm16u(4) = (Bit8s) ((val64 >> 32) & 0xFF);
result.xmm16u(5) = (Bit8s) ((val64 >> 40) & 0xFF);
result.xmm16u(6) = (Bit8s) ((val64 >> 48) & 0xFF);
result.xmm16u(7) = (Bit8s) (val64 >> 56);
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVSXBW_VdqWq: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 21 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVSXBD_VdqWd(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit32u val32;
if (i->modC0())
{
val32 = BX_READ_XMM_REG_LO_DWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val32 = read_virtual_dword(i->seg(), eaddr);
}
result.xmm32u(0) = (Bit8s) (val32 & 0xFF);
result.xmm32u(1) = (Bit8s) ((val32 >> 8) & 0xFF);
result.xmm32u(2) = (Bit8s) ((val32 >> 16) & 0xFF);
result.xmm32u(3) = (Bit8s) (val32 >> 24);
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVSXBD_VdqWd: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 22 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVSXBQ_VdqWw(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit16u val16;
if (i->modC0())
{
val16 = BX_READ_XMM_REG_LO_WORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val16 = read_virtual_word(i->seg(), eaddr);
}
result.xmm64u(0) = (Bit8s) (val16 & 0xFF);
result.xmm64u(1) = (Bit8s) (val16 >> 8);
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVSXBQ_VdqWw: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 23 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVSXWD_VdqWq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit64u val64;
if (i->modC0())
{
val64 = BX_READ_XMM_REG_LO_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val64 = read_virtual_qword(i->seg(), eaddr);
}
result.xmm32u(0) = (Bit16s) (val64 & 0xFFFF);
result.xmm32u(1) = (Bit16s) ((val64 >> 16) & 0xFFFF);
result.xmm32u(2) = (Bit16s) ((val64 >> 32) & 0xFFFF);
result.xmm32u(3) = (Bit16s) (val64 >> 48);
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVSXWD_VdqWq: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 24 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVSXWQ_VdqWd(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit32u val32;
if (i->modC0())
{
val32 = BX_READ_XMM_REG_LO_DWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val32 = read_virtual_dword(i->seg(), eaddr);
}
result.xmm64u(0) = (Bit16s) (val32 & 0xFFFF);
result.xmm64u(1) = (Bit16s) (val32 >> 16);
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVSXWQ_VdqWd: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 25 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVSXDQ_VdqWq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit64u val64;
if (i->modC0())
{
val64 = BX_READ_XMM_REG_LO_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val64 = read_virtual_qword(i->seg(), eaddr);
}
result.xmm64u(0) = (Bit32s) (val64 & 0xFFFFFFFF);
result.xmm64u(1) = (Bit32s) (val64 >> 32);
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVSXDQ_VdqWq: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 2A */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVNTDQA_VdqMdq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
/* source must be memory reference */
if (i->modC0()) {
BX_INFO(("MOVNTDQA_VdqMdq: must be memory reference"));
exception(BX_UD_EXCEPTION, 0, 0);
}
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
read_virtual_dqword_aligned(i->seg(), eaddr, (Bit8u *) &op);
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), op);
#else
BX_INFO(("MOVNTDQA_VdqMdq: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 30 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVZXBW_VdqWq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit64u val64;
if (i->modC0())
{
val64 = BX_READ_XMM_REG_LO_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val64 = read_virtual_qword(i->seg(), eaddr);
}
result.xmm16u(0) = val64 & 0xFF;
result.xmm16u(1) = (val64 >> 8) & 0xFF;
result.xmm16u(2) = (val64 >> 16) & 0xFF;
result.xmm16u(3) = (val64 >> 24) & 0xFF;
result.xmm16u(4) = (val64 >> 32) & 0xFF;
result.xmm16u(5) = (val64 >> 40) & 0xFF;
result.xmm16u(6) = (val64 >> 48) & 0xFF;
result.xmm16u(7) = val64 >> 56;
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVZXBW_VdqWq: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 31 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVZXBD_VdqWd(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit32u val32;
if (i->modC0())
{
val32 = BX_READ_XMM_REG_LO_DWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val32 = read_virtual_dword(i->seg(), eaddr);
}
result.xmm32u(0) = val32 & 0xFF;
result.xmm32u(1) = (val32 >> 8) & 0xFF;
result.xmm32u(2) = (val32 >> 16) & 0xFF;
result.xmm32u(3) = val32 >> 24;
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVZXBD_VdqWd: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 32 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVZXBQ_VdqWw(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit16u val16;
if (i->modC0())
{
val16 = BX_READ_XMM_REG_LO_WORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val16 = read_virtual_word(i->seg(), eaddr);
}
result.xmm64u(0) = val16 & 0xFF;
result.xmm64u(1) = val16 >> 8;
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVZXBQ_VdqWw: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 33 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVZXWD_VdqWq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit64u val64;
if (i->modC0())
{
val64 = BX_READ_XMM_REG_LO_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val64 = read_virtual_qword(i->seg(), eaddr);
}
result.xmm32u(0) = val64 & 0xFFFF;
result.xmm32u(1) = (val64 >> 16) & 0xFFFF;
result.xmm32u(2) = (val64 >> 32) & 0xFFFF;
result.xmm32u(3) = val64 >> 48;
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVZXWD_VdqWq: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 34 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVZXWQ_VdqWd(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit32u val32;
if (i->modC0())
{
val32 = BX_READ_XMM_REG_LO_DWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val32 = read_virtual_dword(i->seg(), eaddr);
}
result.xmm64u(0) = val32 & 0xFFFF;
result.xmm64u(1) = val32 >> 16;
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVZXWQ_VdqWd: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 38 35 */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PMOVZXDQ_VdqWq(bxInstruction_c *i)
{
#if BX_SUPPORT_SSE >= 4
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister result;
Bit64u val64;
if (i->modC0())
{
val64 = BX_READ_XMM_REG_LO_QWORD(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
val64 = read_virtual_qword(i->seg(), eaddr);
}
result.xmm64u(0) = val64 & 0xFFFFFFFF;
result.xmm64u(1) = val64 >> 32;
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PMOVZXDQ_VdqWq: required SSE4, use --enable-sse option"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
/* 66 0F 3A 0F */
void BX_CPP_AttrRegparmN(1) BX_CPU_C::PALIGNR_VdqWdqIb(bxInstruction_c *i)
{
#if (BX_SUPPORT_SSE >= 4) || (BX_SUPPORT_SSE >= 3 && BX_SUPPORT_SSE_EXTENSION > 0)
BX_CPU_THIS_PTR prepareSSE();
BxPackedXmmRegister op1 = BX_READ_XMM_REG(i->nnn()), op2, result;
/* op2 is a register or memory reference */
if (i->modC0()) {
op2 = BX_READ_XMM_REG(i->rm());
}
else {
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
readVirtualDQwordAligned(i->seg(), eaddr, (Bit8u *) &op2);
}
unsigned shift = i->Ib() * 8;
if(shift == 0) {
result.xmm64u(0) = op2.xmm64u(0);
result.xmm64u(1) = op2.xmm64u(1);
}
else if(shift < 64) {
result.xmm64u(0) = (op2.xmm64u(0) >> shift) | (op2.xmm64u(1) << (64-shift));
result.xmm64u(1) = (op2.xmm64u(1) >> shift) | (op1.xmm64u(0) << (64-shift));
}
else if(shift == 64) {
result.xmm64u(0) = op2.xmm64u(1);
result.xmm64u(1) = op1.xmm64u(0);
}
else if(shift < 128) {
shift -= 64;
result.xmm64u(0) = (op2.xmm64u(1) >> shift) | (op1.xmm64u(0) << (64-shift));
result.xmm64u(1) = (op1.xmm64u(0) >> shift) | (op1.xmm64u(1) << (64-shift));
}
else if(shift == 128) {
result.xmm64u(0) = op1.xmm64u(0);
result.xmm64u(1) = op1.xmm64u(1);
}
else if(shift < 192) {
shift -= 128;
result.xmm64u(0) = (op1.xmm64u(0) >> shift) | (op1.xmm64u(1) << (64-shift));
result.xmm64u(1) = (op1.xmm64u(1) >> shift);
}
else if(shift < 256) {
result.xmm64u(0) = op1.xmm64u(1) >> (shift - 192);
result.xmm64u(1) = 0;
}
else {
result.xmm64u(0) = 0;
result.xmm64u(1) = 0;
}
/* now write result back to destination */
BX_WRITE_XMM_REG(i->nnn(), result);
#else
BX_INFO(("PALIGNR_VdqWdqIb: required SSE3E, use --enable-sse and --enable-sse-extension options"));
exception(BX_UD_EXCEPTION, 0, 0);
#endif
}
#endif // BX_SUPPORT_SSE >= 4 || (BX_SUPPORT_SSE >= 3 && BX_SUPPORT_SSE_EXTENSION > 0)
| 21,866 |
335 |
/*
* Sndmix.cpp
* -----------
* Purpose: Pattern playback, effect processing
* Notes : (currently none)
* Authors: <NAME>
* OpenMPT Devs
* The OpenMPT source code is released under the BSD license. Read LICENSE for more details.
*/
#include "stdafx.h"
#include "Sndfile.h"
#include "MixerLoops.h"
#include "MIDIEvents.h"
#include "Tables.h"
#ifdef MODPLUG_TRACKER
#include "../mptrack/TrackerSettings.h"
#endif // MODPLUG_TRACKER
#ifndef NO_PLUGINS
#include "plugins/PlugInterface.h"
#endif // NO_PLUGINS
#include "OPL.h"
OPENMPT_NAMESPACE_BEGIN
// Log tables for pre-amp
// Pre-amp (or more precisely: Pre-attenuation) depends on the number of channels,
// Which this table takes care of.
static constexpr uint8 PreAmpTable[16] =
{
0x60, 0x60, 0x60, 0x70, // 0-7
0x80, 0x88, 0x90, 0x98, // 8-15
0xA0, 0xA4, 0xA8, 0xAC, // 16-23
0xB0, 0xB4, 0xB8, 0xBC, // 24-31
};
#ifndef NO_AGC
static constexpr uint8 PreAmpAGCTable[16] =
{
0x60, 0x60, 0x60, 0x64,
0x68, 0x70, 0x78, 0x80,
0x84, 0x88, 0x8C, 0x90,
0x92, 0x94, 0x96, 0x98,
};
#endif
void CSoundFile::SetMixerSettings(const MixerSettings &mixersettings)
{
SetPreAmp(mixersettings.m_nPreAmp); // adjust agc
bool reset = false;
if(
(mixersettings.gdwMixingFreq != m_MixerSettings.gdwMixingFreq)
||
(mixersettings.gnChannels != m_MixerSettings.gnChannels)
||
(mixersettings.MixerFlags != m_MixerSettings.MixerFlags))
reset = true;
m_MixerSettings = mixersettings;
InitPlayer(reset);
}
void CSoundFile::SetResamplerSettings(const CResamplerSettings &resamplersettings)
{
m_Resampler.m_Settings = resamplersettings;
m_Resampler.UpdateTables();
InitAmigaResampler();
}
void CSoundFile::InitPlayer(bool bReset)
{
if(bReset)
{
ResetMixStat();
m_dryLOfsVol = m_dryROfsVol = 0;
m_surroundLOfsVol = m_surroundROfsVol = 0;
InitAmigaResampler();
}
m_Resampler.UpdateTables();
#ifndef NO_REVERB
m_Reverb.Initialize(bReset, m_RvbROfsVol, m_RvbLOfsVol, m_MixerSettings.gdwMixingFreq);
#endif
#ifndef NO_DSP
m_Surround.Initialize(bReset, m_MixerSettings.gdwMixingFreq);
#endif
#ifndef NO_DSP
m_MegaBass.Initialize(bReset, m_MixerSettings.gdwMixingFreq);
#endif
#ifndef NO_EQ
m_EQ.Initialize(bReset, m_MixerSettings.gdwMixingFreq);
#endif
#ifndef NO_AGC
m_AGC.Initialize(bReset, m_MixerSettings.gdwMixingFreq);
#endif
#ifndef NO_DSP
m_BitCrush.Initialize(bReset, m_MixerSettings.gdwMixingFreq);
#endif
if(m_opl)
{
m_opl->Initialize(m_MixerSettings.gdwMixingFreq);
}
}
bool CSoundFile::FadeSong(uint32 msec)
{
samplecount_t nsamples = Util::muldiv(msec, m_MixerSettings.gdwMixingFreq, 1000);
if (nsamples <= 0) return false;
if (nsamples > 0x100000) nsamples = 0x100000;
m_PlayState.m_nBufferCount = nsamples;
int32 nRampLength = static_cast<int32>(m_PlayState.m_nBufferCount);
// Ramp everything down
for (uint32 noff=0; noff < m_nMixChannels; noff++)
{
ModChannel &pramp = m_PlayState.Chn[m_PlayState.ChnMix[noff]];
pramp.newRightVol = pramp.newLeftVol = 0;
pramp.leftRamp = -pramp.leftVol * (1 << VOLUMERAMPPRECISION) / nRampLength;
pramp.rightRamp = -pramp.rightVol * (1 << VOLUMERAMPPRECISION) / nRampLength;
pramp.rampLeftVol = pramp.leftVol * (1 << VOLUMERAMPPRECISION);
pramp.rampRightVol = pramp.rightVol * (1 << VOLUMERAMPPRECISION);
pramp.nRampLength = nRampLength;
pramp.dwFlags.set(CHN_VOLUMERAMP);
}
return true;
}
// Apply stereo separation factor on an interleaved stereo/quad stream.
// count = Number of stereo sample pairs to process
// separation = -256...256 (negative values = swap L/R, 0 = mono, 128 = normal)
static void ApplyStereoSeparation(mixsample_t *mixBuf, std::size_t count, int32 separation)
{
#ifdef MPT_INTMIXER
const mixsample_t factor_num = separation; // 128 =^= 1.0f
const mixsample_t factor_den = MixerSettings::StereoSeparationScale; // 128
const mixsample_t normalize_den = 2; // mid/side pre/post normalization
const mixsample_t mid_den = normalize_den;
const mixsample_t side_num = factor_num;
const mixsample_t side_den = factor_den * normalize_den;
#else
const float normalize_factor = 0.5f; // cumulative mid/side normalization factor (1/sqrt(2))*(1/sqrt(2))
const float factor = static_cast<float>(separation) / static_cast<float>(MixerSettings::StereoSeparationScale); // sep / 128
const float mid_factor = normalize_factor;
const float side_factor = factor * normalize_factor;
#endif
for(std::size_t i = 0; i < count; i++)
{
mixsample_t l = mixBuf[0];
mixsample_t r = mixBuf[1];
mixsample_t m = l + r;
mixsample_t s = l - r;
#ifdef MPT_INTMIXER
m /= mid_den;
s = Util::muldiv(s, side_num, side_den);
#else
m *= mid_factor;
s *= side_factor;
#endif
l = m + s;
r = m - s;
mixBuf[0] = l;
mixBuf[1] = r;
mixBuf += 2;
}
}
static void ApplyStereoSeparation(mixsample_t *SoundFrontBuffer, mixsample_t *SoundRearBuffer, std::size_t channels, std::size_t countChunk, int32 separation)
{
if(separation == MixerSettings::StereoSeparationScale)
{ // identity
return;
}
if(channels >= 2) ApplyStereoSeparation(SoundFrontBuffer, countChunk, separation);
if(channels >= 4) ApplyStereoSeparation(SoundRearBuffer , countChunk, separation);
}
void CSoundFile::ProcessInputChannels(IAudioSource &source, std::size_t countChunk)
{
for(std::size_t channel = 0; channel < NUMMIXINPUTBUFFERS; ++channel)
{
std::fill(&(MixInputBuffer[channel][0]), &(MixInputBuffer[channel][countChunk]), 0);
}
mixsample_t * buffers[NUMMIXINPUTBUFFERS];
for(std::size_t channel = 0; channel < NUMMIXINPUTBUFFERS; ++channel)
{
buffers[channel] = MixInputBuffer[channel];
}
source.Process(mpt::audio_span_planar(buffers, m_MixerSettings.NumInputChannels, countChunk));
}
// Read one tick but skip all expensive rendering options
CSoundFile::samplecount_t CSoundFile::ReadOneTick()
{
const auto origMaxMixChannels = m_MixerSettings.m_nMaxMixChannels;
m_MixerSettings.m_nMaxMixChannels = 0;
while(m_PlayState.m_nBufferCount)
{
auto framesToRender = std::min(m_PlayState.m_nBufferCount, samplecount_t(MIXBUFFERSIZE));
CreateStereoMix(framesToRender);
m_PlayState.m_nBufferCount -= framesToRender;
m_PlayState.m_lTotalSampleCount += framesToRender;
}
m_MixerSettings.m_nMaxMixChannels = origMaxMixChannels;
if(ReadNote())
return m_PlayState.m_nBufferCount;
else
return 0;
}
CSoundFile::samplecount_t CSoundFile::Read(samplecount_t count, IAudioTarget &target, IAudioSource &source, std::optional<std::reference_wrapper<IMonitorOutput>> outputMonitor, std::optional<std::reference_wrapper<IMonitorInput>> inputMonitor)
{
MPT_ASSERT_ALWAYS(m_MixerSettings.IsValid());
samplecount_t countRendered = 0;
samplecount_t countToRender = count;
while(!m_SongFlags[SONG_ENDREACHED] && countToRender > 0)
{
// Update Channel Data
if(!m_PlayState.m_nBufferCount)
{
// Last tick or fade completely processed, find out what to do next
if(m_SongFlags[SONG_FADINGSONG])
{
// Song was faded out
m_SongFlags.set(SONG_ENDREACHED);
} else if(ReadNote())
{
// Render next tick (normal progress)
MPT_ASSERT(m_PlayState.m_nBufferCount > 0);
#ifdef MODPLUG_TRACKER
// Save pattern cue points for WAV rendering here (if we reached a new pattern, that is.)
if(m_PatternCuePoints != nullptr && (m_PatternCuePoints->empty() || m_PlayState.m_nCurrentOrder != m_PatternCuePoints->back().order))
{
PatternCuePoint cue;
cue.offset = countRendered;
cue.order = m_PlayState.m_nCurrentOrder;
cue.processed = false; // We don't know the base offset in the file here. It has to be added in the main conversion loop.
m_PatternCuePoints->push_back(cue);
}
#endif
} else
{
// No new pattern data
#ifdef MODPLUG_TRACKER
if((m_nMaxOrderPosition) && (m_PlayState.m_nCurrentOrder >= m_nMaxOrderPosition))
{
m_SongFlags.set(SONG_ENDREACHED);
}
#endif // MODPLUG_TRACKER
if(IsRenderingToDisc())
{
// Disable song fade when rendering or when requested in libopenmpt.
m_SongFlags.set(SONG_ENDREACHED);
} else
{ // end of song reached, fade it out
if(FadeSong(FADESONGDELAY)) // sets m_nBufferCount xor returns false
{ // FadeSong sets m_nBufferCount here
MPT_ASSERT(m_PlayState.m_nBufferCount > 0);
m_SongFlags.set(SONG_FADINGSONG);
} else
{
m_SongFlags.set(SONG_ENDREACHED);
}
}
}
}
if(m_SongFlags[SONG_ENDREACHED])
{
// Mix done.
// If we decide to continue the mix (possible in libopenmpt), the tick count
// is valid right now (0), meaning that no new row data will be processed.
// This would effectively prolong the last played row.
m_PlayState.m_nTickCount = m_PlayState.TicksOnRow();
break;
}
MPT_ASSERT(m_PlayState.m_nBufferCount > 0); // assert that we have actually something to do
const samplecount_t countChunk = std::min({ static_cast<samplecount_t>(MIXBUFFERSIZE), static_cast<samplecount_t>(m_PlayState.m_nBufferCount), static_cast<samplecount_t>(countToRender) });
if(m_MixerSettings.NumInputChannels > 0)
{
ProcessInputChannels(source, countChunk);
}
if(inputMonitor)
{
mixsample_t *buffers[NUMMIXINPUTBUFFERS];
for(std::size_t channel = 0; channel < NUMMIXINPUTBUFFERS; ++channel)
{
buffers[channel] = MixInputBuffer[channel];
}
inputMonitor->get().Process(mpt::audio_span_planar<const mixsample_t>(buffers, m_MixerSettings.NumInputChannels, countChunk));
}
CreateStereoMix(countChunk);
if(m_opl)
{
m_opl->Mix(MixSoundBuffer, countChunk, m_OPLVolumeFactor * m_nVSTiVolume / 48);
}
#ifndef NO_REVERB
m_Reverb.Process(MixSoundBuffer, ReverbSendBuffer, m_RvbROfsVol, m_RvbLOfsVol, countChunk);
#endif // NO_REVERB
#ifndef NO_PLUGINS
if(m_loadedPlugins)
{
ProcessPlugins(countChunk);
}
#endif // NO_PLUGINS
if(m_MixerSettings.gnChannels == 1)
{
MonoFromStereo(MixSoundBuffer, countChunk);
}
if(m_PlayConfig.getGlobalVolumeAppliesToMaster())
{
ProcessGlobalVolume(countChunk);
}
if(m_MixerSettings.m_nStereoSeparation != MixerSettings::StereoSeparationScale)
{
ProcessStereoSeparation(countChunk);
}
if(m_MixerSettings.DSPMask)
{
ProcessDSP(countChunk);
}
if(m_MixerSettings.gnChannels == 4)
{
InterleaveFrontRear(MixSoundBuffer, MixRearBuffer, countChunk);
}
if(outputMonitor)
{
outputMonitor->get().Process(mpt::audio_span_interleaved<const mixsample_t>(MixSoundBuffer, m_MixerSettings.gnChannels, countChunk));
}
target.Process(mpt::audio_span_interleaved<mixsample_t>(MixSoundBuffer, m_MixerSettings.gnChannels, countChunk));
// Buffer ready
countRendered += countChunk;
countToRender -= countChunk;
m_PlayState.m_nBufferCount -= countChunk;
m_PlayState.m_lTotalSampleCount += countChunk;
#ifdef MODPLUG_TRACKER
if(IsRenderingToDisc())
{
// Stop playback on F00 if no more voices are active.
// F00 sets the tick count to 65536 in FT2, so it just generates a reaaaally long row.
// Usually this command can be found at the end of a song to effectively stop playback.
// Since we don't want to render hours of silence, we are going to check if there are
// still any channels playing, and if that is no longer the case, we stop playback at
// the end of the next tick.
if(m_PlayState.m_nMusicSpeed == uint16_max && (m_nMixStat == 0 || m_PlayState.m_nGlobalVolume == 0) && GetType() == MOD_TYPE_XM && !m_PlayState.m_nBufferCount)
{
m_SongFlags.set(SONG_ENDREACHED);
}
}
#endif // MODPLUG_TRACKER
}
// mix done
return countRendered;
}
void CSoundFile::ProcessDSP(uint32 countChunk)
{
#ifndef NO_DSP
if(m_MixerSettings.DSPMask & SNDDSP_SURROUND)
{
m_Surround.Process(MixSoundBuffer, MixRearBuffer, countChunk, m_MixerSettings.gnChannels);
}
#endif // NO_DSP
#ifndef NO_DSP
if(m_MixerSettings.DSPMask & SNDDSP_MEGABASS)
{
m_MegaBass.Process(MixSoundBuffer, MixRearBuffer, countChunk, m_MixerSettings.gnChannels);
}
#endif // NO_DSP
#ifndef NO_EQ
if(m_MixerSettings.DSPMask & SNDDSP_EQ)
{
m_EQ.Process(MixSoundBuffer, MixRearBuffer, countChunk, m_MixerSettings.gnChannels);
}
#endif // NO_EQ
#ifndef NO_AGC
if(m_MixerSettings.DSPMask & SNDDSP_AGC)
{
m_AGC.Process(MixSoundBuffer, MixRearBuffer, countChunk, m_MixerSettings.gnChannels);
}
#endif // NO_AGC
#ifndef NO_DSP
if(m_MixerSettings.DSPMask & SNDDSP_BITCRUSH)
{
m_BitCrush.Process(MixSoundBuffer, MixRearBuffer, countChunk, m_MixerSettings.gnChannels);
}
#endif // NO_DSP
#if defined(NO_DSP) && defined(NO_EQ) && defined(NO_AGC)
MPT_UNREFERENCED_PARAMETER(countChunk);
#endif
}
/////////////////////////////////////////////////////////////////////////////
// Handles navigation/effects
bool CSoundFile::ProcessRow()
{
while(++m_PlayState.m_nTickCount >= m_PlayState.TicksOnRow())
{
const auto [ignoreRow, patternTransition] = NextRow(m_PlayState, m_SongFlags[SONG_BREAKTOROW]);
#ifdef MODPLUG_TRACKER
if(patternTransition)
{
HandlePatternTransitionEvents();
}
// "Lock row" editing feature
if(m_lockRowStart != ROWINDEX_INVALID && (m_PlayState.m_nRow < m_lockRowStart || m_PlayState.m_nRow > m_lockRowEnd) && !IsRenderingToDisc())
{
m_PlayState.m_nRow = m_lockRowStart;
}
// "Lock order" editing feature
if(Order().IsPositionLocked(m_PlayState.m_nCurrentOrder) && !IsRenderingToDisc())
{
m_PlayState.m_nCurrentOrder = m_lockOrderStart;
}
#else
MPT_UNUSED_VARIABLE(patternTransition);
#endif // MODPLUG_TRACKER
// Check if pattern is valid
if(!m_SongFlags[SONG_PATTERNLOOP])
{
m_PlayState.m_nPattern = (m_PlayState.m_nCurrentOrder < Order().size()) ? Order()[m_PlayState.m_nCurrentOrder] : Order.GetInvalidPatIndex();
if (m_PlayState.m_nPattern < Patterns.Size() && !Patterns[m_PlayState.m_nPattern].IsValid()) m_PlayState.m_nPattern = Order.GetIgnoreIndex();
while (m_PlayState.m_nPattern >= Patterns.Size())
{
// End of song?
if ((m_PlayState.m_nPattern == Order.GetInvalidPatIndex()) || (m_PlayState.m_nCurrentOrder >= Order().size()))
{
ORDERINDEX restartPosOverride = Order().GetRestartPos();
if(restartPosOverride == 0 && m_PlayState.m_nCurrentOrder <= Order().size() && m_PlayState.m_nCurrentOrder > 0)
{
// Subtune detection. Subtunes are separated by "---" order items, so if we're in a
// subtune and there's no restart position, we go to the first order of the subtune
// (i.e. the first order after the previous "---" item)
for(ORDERINDEX ord = m_PlayState.m_nCurrentOrder - 1; ord > 0; ord--)
{
if(Order()[ord] == Order.GetInvalidPatIndex())
{
// Jump back to first order of this subtune
restartPosOverride = ord + 1;
break;
}
}
}
// If channel resetting is disabled in MPT, we will emulate a pattern break (and we always do it if we're not in MPT)
#ifdef MODPLUG_TRACKER
if(!(TrackerSettings::Instance().m_dwPatternSetup & PATTERN_RESETCHANNELS))
#endif // MODPLUG_TRACKER
{
m_SongFlags.set(SONG_BREAKTOROW);
}
if (restartPosOverride == 0 && !m_SongFlags[SONG_BREAKTOROW])
{
//rewbs.instroVSTi: stop all VSTi at end of song, if looping.
StopAllVsti();
m_PlayState.m_nMusicSpeed = m_nDefaultSpeed;
m_PlayState.m_nMusicTempo = m_nDefaultTempo;
m_PlayState.m_nGlobalVolume = m_nDefaultGlobalVolume;
for(CHANNELINDEX i = 0; i < MAX_CHANNELS; i++)
{
auto &chn = m_PlayState.Chn[i];
if(chn.dwFlags[CHN_ADLIB] && m_opl)
{
m_opl->NoteCut(i);
}
chn.dwFlags.set(CHN_NOTEFADE | CHN_KEYOFF);
chn.nFadeOutVol = 0;
if(i < m_nChannels)
{
chn.nGlobalVol = ChnSettings[i].nVolume;
chn.nVolume = ChnSettings[i].nVolume;
chn.nPan = ChnSettings[i].nPan;
chn.nPanSwing = chn.nVolSwing = 0;
chn.nCutSwing = chn.nResSwing = 0;
chn.nOldVolParam = 0;
chn.oldOffset = 0;
chn.nOldHiOffset = 0;
chn.nPortamentoDest = 0;
if(!chn.nLength)
{
chn.dwFlags = ChnSettings[i].dwFlags;
chn.nLoopStart = 0;
chn.nLoopEnd = 0;
chn.pModInstrument = nullptr;
chn.pModSample = nullptr;
}
}
}
}
//Handle Repeat position
m_PlayState.m_nCurrentOrder = restartPosOverride;
m_SongFlags.reset(SONG_BREAKTOROW);
//If restart pos points to +++, move along
while(m_PlayState.m_nCurrentOrder < Order().size() && Order()[m_PlayState.m_nCurrentOrder] == Order.GetIgnoreIndex())
{
m_PlayState.m_nCurrentOrder++;
}
//Check for end of song or bad pattern
if (m_PlayState.m_nCurrentOrder >= Order().size()
|| !Order().IsValidPat(m_PlayState.m_nCurrentOrder))
{
m_visitedRows.Initialize(true);
return false;
}
} else
{
m_PlayState.m_nCurrentOrder++;
}
if (m_PlayState.m_nCurrentOrder < Order().size())
m_PlayState.m_nPattern = Order()[m_PlayState.m_nCurrentOrder];
else
m_PlayState.m_nPattern = Order.GetInvalidPatIndex();
if (m_PlayState.m_nPattern < Patterns.Size() && !Patterns[m_PlayState.m_nPattern].IsValid())
m_PlayState.m_nPattern = Order.GetIgnoreIndex();
}
m_PlayState.m_nNextOrder = m_PlayState.m_nCurrentOrder;
#ifdef MODPLUG_TRACKER
if ((m_nMaxOrderPosition) && (m_PlayState.m_nCurrentOrder >= m_nMaxOrderPosition)) return false;
#endif // MODPLUG_TRACKER
}
// Weird stuff?
if (!Patterns.IsValidPat(m_PlayState.m_nPattern))
return false;
// Did we jump to an invalid row?
if (m_PlayState.m_nRow >= Patterns[m_PlayState.m_nPattern].GetNumRows()) m_PlayState.m_nRow = 0;
// Has this row been visited before? We might want to stop playback now.
// But: We will not mark the row as modified if the song is not in loop mode but
// the pattern loop (editor flag, not to be confused with the pattern loop effect)
// flag is set - because in that case, the module would stop after the first pattern loop...
const bool overrideLoopCheck = (m_nRepeatCount != -1) && m_SongFlags[SONG_PATTERNLOOP];
if(!overrideLoopCheck && m_visitedRows.Visit(m_PlayState.m_nCurrentOrder, m_PlayState.m_nRow, m_PlayState.Chn, ignoreRow))
{
if(m_nRepeatCount)
{
// repeat count == -1 means repeat infinitely.
if(m_nRepeatCount > 0)
{
m_nRepeatCount--;
}
// Forget all but the current row.
m_visitedRows.Initialize(true);
m_visitedRows.Visit(m_PlayState.m_nCurrentOrder, m_PlayState.m_nRow, m_PlayState.Chn, ignoreRow);
} else
{
#ifdef MODPLUG_TRACKER
// Let's check again if this really is the end of the song.
// The visited rows vector might have been screwed up while editing...
// This is of course not possible during rendering to WAV, so we ignore that case.
bool isReallyAtEnd = IsRenderingToDisc();
if(!isReallyAtEnd)
{
for(const auto &t : GetLength(eNoAdjust, GetLengthTarget(true)))
{
if(t.lastOrder == m_PlayState.m_nCurrentOrder && t.lastRow == m_PlayState.m_nRow)
{
isReallyAtEnd = true;
break;
}
}
}
if(isReallyAtEnd)
{
// This is really the song's end!
m_visitedRows.Initialize(true);
return false;
} else
{
// Ok, this is really dirty, but we have to update the visited rows vector...
GetLength(eAdjustOnlyVisitedRows, GetLengthTarget(m_PlayState.m_nCurrentOrder, m_PlayState.m_nRow));
}
#else
if(m_SongFlags[SONG_PLAYALLSONGS])
{
// When playing all subsongs consecutively, first search for any hidden subsongs...
if(!m_visitedRows.GetFirstUnvisitedRow(m_PlayState.m_nCurrentOrder, m_PlayState.m_nRow, true))
{
// ...and then try the next sequence.
m_PlayState.m_nNextOrder = m_PlayState.m_nCurrentOrder = 0;
m_PlayState.m_nNextRow = m_PlayState.m_nRow = 0;
if(Order.GetCurrentSequenceIndex() >= Order.GetNumSequences() - 1)
{
Order.SetSequence(0);
m_visitedRows.Initialize(true);
return false;
}
Order.SetSequence(Order.GetCurrentSequenceIndex() + 1);
m_visitedRows.Initialize(true);
}
// When jumping to the next subsong, stop all playing notes from the previous song...
const auto muteFlag = CSoundFile::GetChannelMuteFlag();
for(CHANNELINDEX i = 0; i < MAX_CHANNELS; i++)
m_PlayState.Chn[i].Reset(ModChannel::resetSetPosFull, *this, i, muteFlag);
StopAllVsti();
// ...and the global playback information.
m_PlayState.m_nMusicSpeed = m_nDefaultSpeed;
m_PlayState.m_nMusicTempo = m_nDefaultTempo;
m_PlayState.m_nGlobalVolume = m_nDefaultGlobalVolume;
m_PlayState.m_nNextOrder = m_PlayState.m_nCurrentOrder;
m_PlayState.m_nNextRow = m_PlayState.m_nRow;
if(Order().size() > m_PlayState.m_nCurrentOrder)
m_PlayState.m_nPattern = Order()[m_PlayState.m_nCurrentOrder];
m_visitedRows.Visit(m_PlayState.m_nCurrentOrder, m_PlayState.m_nRow, m_PlayState.Chn, ignoreRow);
if (!Patterns.IsValidPat(m_PlayState.m_nPattern))
return false;
} else
{
m_visitedRows.Initialize(true);
return false;
}
#endif // MODPLUG_TRACKER
}
}
SetupNextRow(m_PlayState, m_SongFlags[SONG_PATTERNLOOP]);
// Reset channel values
ModCommand *m = Patterns[m_PlayState.m_nPattern].GetpModCommand(m_PlayState.m_nRow, 0);
for (ModChannel *pChn = m_PlayState.Chn, *pEnd = pChn + m_nChannels; pChn != pEnd; pChn++, m++)
{
// First, handle some quirks that happen after the last tick of the previous row...
if(m_playBehaviour[KST3PortaAfterArpeggio]
&& pChn->nCommand == CMD_ARPEGGIO // Previous row state!
&& (m->command == CMD_PORTAMENTOUP || m->command == CMD_PORTAMENTODOWN))
{
// In ST3, a portamento immediately following an arpeggio continues where the arpeggio left off.
// Test case: PortaAfterArp.s3m
pChn->nPeriod = GetPeriodFromNote(pChn->nArpeggioLastNote, pChn->nFineTune, pChn->nC5Speed);
}
if(m_playBehaviour[kMODOutOfRangeNoteDelay]
&& !m->IsNote()
&& pChn->rowCommand.IsNote()
&& pChn->rowCommand.command == CMD_MODCMDEX && (pChn->rowCommand.param & 0xF0) == 0xD0
&& (pChn->rowCommand.param & 0x0Fu) >= m_PlayState.m_nMusicSpeed)
{
// In ProTracker, a note triggered by an out-of-range note delay can be heard on the next row
// if there is no new note on that row.
// Test case: NoteDelay-NextRow.mod
pChn->nPeriod = GetPeriodFromNote(pChn->rowCommand.note, pChn->nFineTune, 0);
}
if(m_playBehaviour[kMODTempoOnSecondTick] && !m_playBehaviour[kMODVBlankTiming] && m_PlayState.m_nMusicSpeed == 1 && pChn->rowCommand.command == CMD_TEMPO)
{
// ProTracker sets the tempo after the first tick. This block handles the case of one tick per row.
// Test case: TempoChange.mod
m_PlayState.m_nMusicTempo = TEMPO(pChn->rowCommand.param, 0);
}
pChn->rowCommand = *m;
pChn->rightVol = pChn->newRightVol;
pChn->leftVol = pChn->newLeftVol;
pChn->dwFlags.reset(CHN_VIBRATO | CHN_TREMOLO);
if(!m_playBehaviour[kITVibratoTremoloPanbrello]) pChn->nPanbrelloOffset = 0;
pChn->nCommand = CMD_NONE;
pChn->m_plugParamValueStep = 0;
}
// Now that we know which pattern we're on, we can update time signatures (global or pattern-specific)
UpdateTimeSignature();
if(ignoreRow)
{
m_PlayState.m_nTickCount = m_PlayState.m_nMusicSpeed;
continue;
}
break;
}
// Should we process tick0 effects?
if (!m_PlayState.m_nMusicSpeed) m_PlayState.m_nMusicSpeed = 1;
//End of row? stop pattern step (aka "play row").
#ifdef MODPLUG_TRACKER
if (m_PlayState.m_nTickCount >= m_PlayState.TicksOnRow() - 1)
{
if(m_SongFlags[SONG_STEP])
{
m_SongFlags.reset(SONG_STEP);
m_SongFlags.set(SONG_PAUSED);
}
}
#endif // MODPLUG_TRACKER
if (m_PlayState.m_nTickCount)
{
m_SongFlags.reset(SONG_FIRSTTICK);
if(!(GetType() & (MOD_TYPE_XM | MOD_TYPE_MT2))
&& (GetType() != MOD_TYPE_MOD || m_SongFlags[SONG_PT_MODE]) // Fix infinite loop in "GamerMan " by MrGamer, which was made with FT2
&& m_PlayState.m_nTickCount < m_PlayState.TicksOnRow())
{
// Emulate first tick behaviour if Row Delay is set.
// Test cases: PatternDelaysRetrig.it, PatternDelaysRetrig.s3m, PatternDelaysRetrig.xm, PatternDelaysRetrig.mod
if(!(m_PlayState.m_nTickCount % (m_PlayState.m_nMusicSpeed + m_PlayState.m_nFrameDelay)))
{
m_SongFlags.set(SONG_FIRSTTICK);
}
}
} else
{
m_SongFlags.set(SONG_FIRSTTICK);
m_SongFlags.reset(SONG_BREAKTOROW);
}
// Update Effects
return ProcessEffects();
}
std::pair<bool, bool> CSoundFile::NextRow(PlayState &playState, const bool breakRow) const
{
// When having an EEx effect on the same row as a Dxx jump, the target row is not played in ProTracker.
// Test case: DelayBreak.mod (based on condom_corruption by Travolta)
const bool ignoreRow = playState.m_nPatternDelay > 1 && breakRow && GetType() == MOD_TYPE_MOD;
// Done with the last row of the pattern or jumping somewhere else (could also be a result of pattern loop to row 0, but that doesn't matter here)
const bool patternTransition = playState.m_nNextRow == 0 || breakRow;
if(patternTransition && GetType() == MOD_TYPE_S3M)
{
// Reset pattern loop start
// Test case: LoopReset.s3m
for(CHANNELINDEX i = 0; i < GetNumChannels(); i++)
{
playState.Chn[i].nPatternLoop = 0;
}
}
playState.m_nPatternDelay = 0;
playState.m_nFrameDelay = 0;
playState.m_nTickCount = 0;
playState.m_nRow = playState.m_nNextRow;
playState.m_nCurrentOrder = playState.m_nNextOrder;
return {ignoreRow, patternTransition};
}
void CSoundFile::SetupNextRow(PlayState &playState, const bool patternLoop) const
{
playState.m_nNextRow = playState.m_nRow + 1;
if(playState.m_nNextRow >= Patterns[playState.m_nPattern].GetNumRows())
{
if(!patternLoop)
playState.m_nNextOrder = playState.m_nCurrentOrder + 1;
playState.m_nNextRow = 0;
// FT2 idiosyncrasy: When E60 is used on a pattern row x, the following pattern also starts from row x
// instead of the beginning of the pattern, unless there was a Bxx or Dxx effect.
if(m_playBehaviour[kFT2LoopE60Restart])
{
playState.m_nNextRow = playState.m_nextPatStartRow;
playState.m_nextPatStartRow = 0;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////
// Channel effect processing
// Calculate delta for Vibrato / Tremolo / Panbrello effect
int CSoundFile::GetVibratoDelta(int type, int position) const
{
// IT compatibility: IT has its own, more precise tables
if(m_playBehaviour[kITVibratoTremoloPanbrello])
{
position &= 0xFF;
switch(type & 0x03)
{
case 0: // Sine
default:
return ITSinusTable[position];
case 1: // Ramp down
return 64 - (position + 1) / 2;
case 2: // Square
return position < 128 ? 64 : 0;
case 3: // Random
return mpt::random<int, 7>(AccessPRNG()) - 0x40;
}
} else if(GetType() & (MOD_TYPE_DIGI | MOD_TYPE_DBM))
{
// Other waveforms are not supported.
static constexpr int8 DBMSinus[] =
{
33, 52, 69, 84, 96, 107, 116, 122, 125, 127, 125, 122, 116, 107, 96, 84,
69, 52, 33, 13, -8, -31, -54, -79, -104,-128, -104, -79, -54, -31, -8, 13,
};
return DBMSinus[(position / 2u) & 0x1F];
} else
{
position &= 0x3F;
switch(type & 0x03)
{
case 0: // Sine
default:
return ModSinusTable[position];
case 1: // Ramp down
return (position < 32 ? 0 : 255) - position * 4;
case 2: // Square
return position < 32 ? 127 : -127;
case 3: // Random
return ModRandomTable[position];
}
}
}
void CSoundFile::ProcessVolumeSwing(ModChannel &chn, int &vol) const
{
if(m_playBehaviour[kITSwingBehaviour])
{
vol += chn.nVolSwing;
Limit(vol, 0, 64);
} else if(m_playBehaviour[kMPTOldSwingBehaviour])
{
vol += chn.nVolSwing;
Limit(vol, 0, 256);
} else
{
chn.nVolume += chn.nVolSwing;
Limit(chn.nVolume, 0, 256);
vol = chn.nVolume;
chn.nVolSwing = 0;
}
}
void CSoundFile::ProcessPanningSwing(ModChannel &chn) const
{
if(m_playBehaviour[kITSwingBehaviour] || m_playBehaviour[kMPTOldSwingBehaviour])
{
chn.nRealPan = chn.nPan + chn.nPanSwing;
Limit(chn.nRealPan, 0, 256);
} else
{
chn.nPan += chn.nPanSwing;
Limit(chn.nPan, 0, 256);
chn.nPanSwing = 0;
chn.nRealPan = chn.nPan;
}
}
void CSoundFile::ProcessTremolo(ModChannel &chn, int &vol) const
{
if (chn.dwFlags[CHN_TREMOLO])
{
if(m_SongFlags.test_all(SONG_FIRSTTICK | SONG_PT_MODE))
{
// ProTracker doesn't apply tremolo nor advance on the first tick.
// Test case: VibratoReset.mod
return;
}
// IT compatibility: Why would you not want to execute tremolo at volume 0?
if(vol > 0 || m_playBehaviour[kITVibratoTremoloPanbrello])
{
// IT compatibility: We don't need a different attenuation here because of the different tables we're going to use
const uint8 attenuation = ((GetType() & (MOD_TYPE_XM | MOD_TYPE_MOD)) || m_playBehaviour[kITVibratoTremoloPanbrello]) ? 5 : 6;
int delta = GetVibratoDelta(chn.nTremoloType, chn.nTremoloPos);
if((chn.nTremoloType & 0x03) == 1 && m_playBehaviour[kFT2MODTremoloRampWaveform])
{
// FT2 compatibility: Tremolo ramp down / triangle implementation is weird and affected by vibrato position (copypaste bug)
// Test case: TremoloWaveforms.xm, TremoloVibrato.xm
uint8 ramp = (chn.nTremoloPos * 4u) & 0x7F;
// Volume-colum vibrato gets executed first in FT2, so we may need to advance the vibrato position first
uint32 vibPos = chn.nVibratoPos;
if(!m_SongFlags[SONG_FIRSTTICK] && chn.dwFlags[CHN_VIBRATO])
vibPos += chn.nVibratoSpeed;
if((vibPos & 0x3F) >= 32)
ramp ^= 0x7F;
if((chn.nTremoloPos & 0x3F) >= 32)
delta = -ramp;
else
delta = ramp;
}
if(GetType() != MOD_TYPE_DMF)
{
vol += (delta * chn.nTremoloDepth) / (1 << attenuation);
} else
{
// Tremolo in DMF always attenuates by a percentage of the current note volume
vol -= (vol * chn.nTremoloDepth * (64 - delta)) / (128 * 64);
}
}
if(!m_SongFlags[SONG_FIRSTTICK] || ((GetType() & (MOD_TYPE_IT|MOD_TYPE_MPT)) && !m_SongFlags[SONG_ITOLDEFFECTS]))
{
// IT compatibility: IT has its own, more precise tables
if(m_playBehaviour[kITVibratoTremoloPanbrello])
chn.nTremoloPos += 4 * chn.nTremoloSpeed;
else
chn.nTremoloPos += chn.nTremoloSpeed;
}
}
}
void CSoundFile::ProcessTremor(CHANNELINDEX nChn, int &vol)
{
ModChannel &chn = m_PlayState.Chn[nChn];
if(m_playBehaviour[kFT2Tremor])
{
// FT2 Compatibility: Weird XM tremor.
// Test case: Tremor.xm
if(chn.nTremorCount & 0x80)
{
if(!m_SongFlags[SONG_FIRSTTICK] && chn.nCommand == CMD_TREMOR)
{
chn.nTremorCount &= ~0x20;
if(chn.nTremorCount == 0x80)
{
// Reached end of off-time
chn.nTremorCount = (chn.nTremorParam >> 4) | 0xC0;
} else if(chn.nTremorCount == 0xC0)
{
// Reached end of on-time
chn.nTremorCount = (chn.nTremorParam & 0x0F) | 0x80;
} else
{
chn.nTremorCount--;
}
chn.dwFlags.set(CHN_FASTVOLRAMP);
}
if((chn.nTremorCount & 0xE0) == 0x80)
{
vol = 0;
}
}
} else if(chn.nCommand == CMD_TREMOR)
{
// IT compatibility 12. / 13.: Tremor
if(m_playBehaviour[kITTremor])
{
if((chn.nTremorCount & 0x80) && chn.nLength)
{
if (chn.nTremorCount == 0x80)
chn.nTremorCount = (chn.nTremorParam >> 4) | 0xC0;
else if (chn.nTremorCount == 0xC0)
chn.nTremorCount = (chn.nTremorParam & 0x0F) | 0x80;
else
chn.nTremorCount--;
}
if((chn.nTremorCount & 0xC0) == 0x80)
vol = 0;
} else
{
uint8 ontime = chn.nTremorParam >> 4;
uint8 n = ontime + (chn.nTremorParam & 0x0F); // Total tremor cycle time (On + Off)
if ((!(GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT))) || m_SongFlags[SONG_ITOLDEFFECTS])
{
n += 2;
ontime++;
}
uint8 tremcount = chn.nTremorCount;
if(!(GetType() & MOD_TYPE_XM))
{
if (tremcount >= n) tremcount = 0;
if (tremcount >= ontime) vol = 0;
chn.nTremorCount = tremcount + 1;
} else
{
if(m_SongFlags[SONG_FIRSTTICK])
{
// tremcount is only 0 on the first tremor tick after triggering a note.
if(tremcount > 0)
{
tremcount--;
}
} else
{
chn.nTremorCount = tremcount + 1;
}
if (tremcount % n >= ontime) vol = 0;
}
}
chn.dwFlags.set(CHN_FASTVOLRAMP);
}
#ifndef NO_PLUGINS
// Plugin tremor
if(chn.nCommand == CMD_TREMOR && chn.pModInstrument && chn.pModInstrument->nMixPlug
&& !chn.pModInstrument->dwFlags[INS_MUTE]
&& !chn.dwFlags[CHN_MUTE | CHN_SYNCMUTE]
&& ModCommand::IsNote(chn.nLastNote))
{
const ModInstrument *pIns = chn.pModInstrument;
IMixPlugin *pPlugin = m_MixPlugins[pIns->nMixPlug - 1].pMixPlugin;
if(pPlugin)
{
const bool isPlaying = pPlugin->IsNotePlaying(chn.nLastNote, nChn);
if(vol == 0 && isPlaying)
pPlugin->MidiCommand(*pIns, chn.nLastNote + NOTE_MAX_SPECIAL, 0, nChn);
else if(vol != 0 && !isPlaying)
pPlugin->MidiCommand(*pIns, chn.nLastNote, static_cast<uint16>(chn.nVolume), nChn);
}
}
#endif // NO_PLUGINS
}
bool CSoundFile::IsEnvelopeProcessed(const ModChannel &chn, EnvelopeType env) const
{
if(chn.pModInstrument == nullptr)
{
return false;
}
const InstrumentEnvelope &insEnv = chn.pModInstrument->GetEnvelope(env);
// IT Compatibility: S77/S79/S7B do not disable the envelope, they just pause the counter
// Test cases: s77.it, EnvLoops.xm, PanSustainRelease.xm
bool playIfPaused = m_playBehaviour[kITEnvelopePositionHandling] || m_playBehaviour[kFT2PanSustainRelease];
return ((chn.GetEnvelope(env).flags[ENV_ENABLED] || (insEnv.dwFlags[ENV_ENABLED] && playIfPaused))
&& !insEnv.empty());
}
void CSoundFile::ProcessVolumeEnvelope(ModChannel &chn, int &vol) const
{
if(IsEnvelopeProcessed(chn, ENV_VOLUME))
{
const ModInstrument *pIns = chn.pModInstrument;
if(m_playBehaviour[kITEnvelopePositionHandling] && chn.VolEnv.nEnvPosition == 0)
{
// If the envelope is disabled at the very same moment as it is triggered, we do not process anything.
return;
}
const int envpos = chn.VolEnv.nEnvPosition - (m_playBehaviour[kITEnvelopePositionHandling] ? 1 : 0);
// Get values in [0, 256]
int envval = pIns->VolEnv.GetValueFromPosition(envpos, 256);
// if we are in the release portion of the envelope,
// rescale envelope factor so that it is proportional to the release point
// and release envelope beginning.
if(pIns->VolEnv.nReleaseNode != ENV_RELEASE_NODE_UNSET
&& chn.VolEnv.nEnvValueAtReleaseJump != NOT_YET_RELEASED)
{
int envValueAtReleaseJump = chn.VolEnv.nEnvValueAtReleaseJump;
int envValueAtReleaseNode = pIns->VolEnv[pIns->VolEnv.nReleaseNode].value * 4;
//If we have just hit the release node, force the current env value
//to be that of the release node. This works around the case where
// we have another node at the same position as the release node.
if(envpos == pIns->VolEnv[pIns->VolEnv.nReleaseNode].tick)
envval = envValueAtReleaseNode;
if(m_playBehaviour[kLegacyReleaseNode])
{
// Old, hard to grasp release node behaviour (additive)
int relativeVolumeChange = (envval - envValueAtReleaseNode) * 2;
envval = envValueAtReleaseJump + relativeVolumeChange;
} else
{
// New behaviour, truly relative to release node
if(envValueAtReleaseNode > 0)
envval = envValueAtReleaseJump * envval / envValueAtReleaseNode;
else
envval = 0;
}
}
vol = (vol * Clamp(envval, 0, 512)) / 256;
}
}
void CSoundFile::ProcessPanningEnvelope(ModChannel &chn) const
{
if(IsEnvelopeProcessed(chn, ENV_PANNING))
{
const ModInstrument *pIns = chn.pModInstrument;
if(m_playBehaviour[kITEnvelopePositionHandling] && chn.PanEnv.nEnvPosition == 0)
{
// If the envelope is disabled at the very same moment as it is triggered, we do not process anything.
return;
}
const int envpos = chn.PanEnv.nEnvPosition - (m_playBehaviour[kITEnvelopePositionHandling] ? 1 : 0);
// Get values in [-32, 32]
const int envval = pIns->PanEnv.GetValueFromPosition(envpos, 64) - 32;
int pan = chn.nRealPan;
if(pan >= 128)
{
pan += (envval * (256 - pan)) / 32;
} else
{
pan += (envval * (pan)) / 32;
}
chn.nRealPan = Clamp(pan, 0, 256);
}
}
int CSoundFile::ProcessPitchFilterEnvelope(ModChannel &chn, int32 &period) const
{
if(IsEnvelopeProcessed(chn, ENV_PITCH))
{
const ModInstrument *pIns = chn.pModInstrument;
if(m_playBehaviour[kITEnvelopePositionHandling] && chn.PitchEnv.nEnvPosition == 0)
{
// If the envelope is disabled at the very same moment as it is triggered, we do not process anything.
return -1;
}
const int envpos = chn.PitchEnv.nEnvPosition - (m_playBehaviour[kITEnvelopePositionHandling] ? 1 : 0);
// Get values in [-256, 256]
#ifdef MODPLUG_TRACKER
const int32 range = ENVELOPE_MAX;
const int32 amp = 512;
#else
// TODO: AMS2 envelopes behave differently when linear slides are off - emulate with 15 * (-128...127) >> 6
// Copy over vibrato behaviour for that?
const int32 range = GetType() == MOD_TYPE_AMS ? uint8_max : ENVELOPE_MAX;
int32 amp;
switch(GetType())
{
case MOD_TYPE_AMS: amp = 64; break;
case MOD_TYPE_MDL: amp = 192; break;
default: amp = 512;
}
#endif
const int envval = pIns->PitchEnv.GetValueFromPosition(envpos, amp, range) - amp / 2;
if(chn.PitchEnv.flags[ENV_FILTER])
{
// Filter Envelope: controls cutoff frequency
return SetupChannelFilter(chn, !chn.dwFlags[CHN_FILTER], envval);
} else
{
// Pitch Envelope
if(chn.HasCustomTuning())
{
if(chn.nFineTune != envval)
{
chn.nFineTune = mpt::saturate_cast<int16>(envval);
chn.m_CalculateFreq = true;
//Preliminary tests indicated that this behavior
//is very close to original(with 12TET) when finestep count
//is 15.
}
} else //Original behavior
{
const bool useFreq = PeriodsAreFrequencies();
const uint32 (&upTable)[256] = useFreq ? LinearSlideUpTable : LinearSlideDownTable;
const uint32 (&downTable)[256] = useFreq ? LinearSlideDownTable : LinearSlideUpTable;
int l = envval;
if(l < 0)
{
l = -l;
LimitMax(l, 255);
period = Util::muldiv(period, downTable[l], 65536);
} else
{
LimitMax(l, 255);
period = Util::muldiv(period, upTable[l], 65536);
}
} //End: Original behavior.
}
}
return -1;
}
void CSoundFile::IncrementEnvelopePosition(ModChannel &chn, EnvelopeType envType) const
{
ModChannel::EnvInfo &chnEnv = chn.GetEnvelope(envType);
if(chn.pModInstrument == nullptr || !chnEnv.flags[ENV_ENABLED])
{
return;
}
// Increase position
uint32 position = chnEnv.nEnvPosition + (m_playBehaviour[kITEnvelopePositionHandling] ? 0 : 1);
const InstrumentEnvelope &insEnv = chn.pModInstrument->GetEnvelope(envType);
if(insEnv.empty())
{
return;
}
bool endReached = false;
if(!m_playBehaviour[kITEnvelopePositionHandling])
{
// FT2-style envelope processing.
if(insEnv.dwFlags[ENV_LOOP])
{
// Normal loop active
uint32 end = insEnv[insEnv.nLoopEnd].tick;
if(!(GetType() & (MOD_TYPE_XM | MOD_TYPE_MT2))) end++;
// FT2 compatibility: If the sustain point is at the loop end and the sustain loop has been released, don't loop anymore.
// Test case: EnvLoops.xm
const bool escapeLoop = (insEnv.nLoopEnd == insEnv.nSustainEnd && insEnv.dwFlags[ENV_SUSTAIN] && chn.dwFlags[CHN_KEYOFF] && m_playBehaviour[kFT2EnvelopeEscape]);
if(position == end && !escapeLoop)
{
position = insEnv[insEnv.nLoopStart].tick;
}
}
if(insEnv.dwFlags[ENV_SUSTAIN] && !chn.dwFlags[CHN_KEYOFF])
{
// Envelope sustained
if(position == insEnv[insEnv.nSustainEnd].tick + 1u)
{
position = insEnv[insEnv.nSustainStart].tick;
// FT2 compatibility: If the panning envelope reaches its sustain point before key-off, it stays there forever.
// Test case: PanSustainRelease.xm
if(m_playBehaviour[kFT2PanSustainRelease] && envType == ENV_PANNING && !chn.dwFlags[CHN_KEYOFF])
{
chnEnv.flags.reset(ENV_ENABLED);
}
}
} else
{
// Limit to last envelope point
if(position > insEnv.back().tick)
{
// Env of envelope
position = insEnv.back().tick;
endReached = true;
}
}
} else
{
// IT envelope processing.
// Test case: EnvLoops.it
uint32 start, end;
// IT compatiblity: OpenMPT processes the key-off flag earlier than IT. Grab the flag from the previous tick instead.
// Test case: EnvOffLength.it
if(insEnv.dwFlags[ENV_SUSTAIN] && !chn.dwOldFlags[CHN_KEYOFF] && (chnEnv.nEnvValueAtReleaseJump == NOT_YET_RELEASED || m_playBehaviour[kReleaseNodePastSustainBug]))
{
// Envelope sustained
start = insEnv[insEnv.nSustainStart].tick;
end = insEnv[insEnv.nSustainEnd].tick + 1;
} else if(insEnv.dwFlags[ENV_LOOP])
{
// Normal loop active
start = insEnv[insEnv.nLoopStart].tick;
end = insEnv[insEnv.nLoopEnd].tick + 1;
} else
{
// Limit to last envelope point
start = end = insEnv.back().tick;
if(position > end)
{
// Env of envelope
endReached = true;
}
}
if(position >= end)
{
position = start;
}
}
if(envType == ENV_VOLUME && endReached)
{
// Special handling for volume envelopes at end of envelope
if((GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT)) || (chn.dwFlags[CHN_KEYOFF] && GetType() != MOD_TYPE_MDL))
{
chn.dwFlags.set(CHN_NOTEFADE);
}
if(insEnv.back().value == 0 && (chn.nMasterChn > 0 || (GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT))))
{
// Stop channel if the last envelope node is silent anyway.
chn.dwFlags.set(CHN_NOTEFADE);
chn.nFadeOutVol = 0;
chn.nRealVolume = 0;
chn.nCalcVolume = 0;
}
}
chnEnv.nEnvPosition = position + (m_playBehaviour[kITEnvelopePositionHandling] ? 1 : 0);
}
void CSoundFile::IncrementEnvelopePositions(ModChannel &chn) const
{
if (chn.isFirstTick && GetType() == MOD_TYPE_MED)
return;
IncrementEnvelopePosition(chn, ENV_VOLUME);
IncrementEnvelopePosition(chn, ENV_PANNING);
IncrementEnvelopePosition(chn, ENV_PITCH);
}
void CSoundFile::ProcessInstrumentFade(ModChannel &chn, int &vol) const
{
// FadeOut volume
if(chn.dwFlags[CHN_NOTEFADE] && chn.pModInstrument != nullptr)
{
const ModInstrument *pIns = chn.pModInstrument;
uint32 fadeout = pIns->nFadeOut;
if (fadeout)
{
chn.nFadeOutVol -= fadeout * 2;
if (chn.nFadeOutVol <= 0) chn.nFadeOutVol = 0;
vol = (vol * chn.nFadeOutVol) / 65536;
} else if (!chn.nFadeOutVol)
{
vol = 0;
}
}
}
void CSoundFile::ProcessPitchPanSeparation(int32 &pan, int note, const ModInstrument &instr)
{
if(!instr.nPPS || note == NOTE_NONE)
return;
// with PPS = 16 / PPC = C-5, E-6 will pan hard right (and D#6 will not)
int32 delta = (note - instr.nPPC - NOTE_MIN) * instr.nPPS / 2;
pan = Clamp(pan + delta, 0, 256);
}
void CSoundFile::ProcessPanbrello(ModChannel &chn) const
{
int pdelta = chn.nPanbrelloOffset;
if(chn.rowCommand.command == CMD_PANBRELLO)
{
uint32 panpos;
// IT compatibility: IT has its own, more precise tables
if(m_playBehaviour[kITVibratoTremoloPanbrello])
panpos = chn.nPanbrelloPos;
else
panpos = ((chn.nPanbrelloPos + 0x10) >> 2);
pdelta = GetVibratoDelta(chn.nPanbrelloType, panpos);
// IT compatibility: Sample-and-hold style random panbrello (tremolo and vibrato don't use this mechanism in IT)
// Test case: RandomWaveform.it
if(m_playBehaviour[kITSampleAndHoldPanbrello] && chn.nPanbrelloType == 3)
{
if(chn.nPanbrelloPos == 0 || chn.nPanbrelloPos >= chn.nPanbrelloSpeed)
{
chn.nPanbrelloPos = 0;
chn.nPanbrelloRandomMemory = static_cast<int8>(pdelta);
}
chn.nPanbrelloPos++;
pdelta = chn.nPanbrelloRandomMemory;
} else
{
chn.nPanbrelloPos += chn.nPanbrelloSpeed;
}
// IT compatibility: Panbrello effect is active until next note or panning command.
// Test case: PanbrelloHold.it
if(m_playBehaviour[kITPanbrelloHold])
{
chn.nPanbrelloOffset = static_cast<int8>(pdelta);
}
}
if(pdelta)
{
pdelta = ((pdelta * (int)chn.nPanbrelloDepth) + 2) / 8;
pdelta += chn.nRealPan;
chn.nRealPan = Clamp(pdelta, 0, 256);
}
}
void CSoundFile::ProcessArpeggio(CHANNELINDEX nChn, int32 &period, Tuning::NOTEINDEXTYPE &arpeggioSteps)
{
ModChannel &chn = m_PlayState.Chn[nChn];
#ifndef NO_PLUGINS
// Plugin arpeggio
if(chn.pModInstrument && chn.pModInstrument->nMixPlug
&& !chn.pModInstrument->dwFlags[INS_MUTE]
&& !chn.dwFlags[CHN_MUTE | CHN_SYNCMUTE])
{
const ModInstrument *pIns = chn.pModInstrument;
IMixPlugin *pPlugin = m_MixPlugins[pIns->nMixPlug - 1].pMixPlugin;
if(pPlugin)
{
uint8 step = 0;
const bool arpOnRow = (chn.rowCommand.command == CMD_ARPEGGIO);
const ModCommand::NOTE lastNote = ModCommand::IsNote(chn.nLastNote) ? static_cast<ModCommand::NOTE>(pIns->NoteMap[chn.nLastNote - NOTE_MIN]) : static_cast<ModCommand::NOTE>(NOTE_NONE);
if(arpOnRow)
{
switch(m_PlayState.m_nTickCount % 3)
{
case 1: step = chn.nArpeggio >> 4; break;
case 2: step = chn.nArpeggio & 0x0F; break;
}
chn.nArpeggioBaseNote = lastNote;
}
// Trigger new note:
// - If there's an arpeggio on this row and
// - the note to trigger is not the same as the previous arpeggio note or
// - a pattern note has just been triggered on this tick
// - If there's no arpeggio
// - but an arpeggio note is still active and
// - there's no note stop or new note that would stop it anyway
if((arpOnRow && chn.nArpeggioLastNote != chn.nArpeggioBaseNote + step && (!m_SongFlags[SONG_FIRSTTICK] || !chn.rowCommand.IsNote()))
|| (!arpOnRow && chn.rowCommand.note == NOTE_NONE && chn.nArpeggioLastNote != NOTE_NONE))
SendMIDINote(nChn, chn.nArpeggioBaseNote + step, static_cast<uint16>(chn.nVolume));
// Stop note:
// - If some arpeggio note is still registered or
// - When starting an arpeggio on a row with no other note on it, stop some possibly still playing note.
if(chn.nArpeggioLastNote != NOTE_NONE)
SendMIDINote(nChn, chn.nArpeggioLastNote + NOTE_MAX_SPECIAL, 0);
else if(arpOnRow && m_SongFlags[SONG_FIRSTTICK] && !chn.rowCommand.IsNote() && ModCommand::IsNote(lastNote))
SendMIDINote(nChn, lastNote + NOTE_MAX_SPECIAL, 0);
if(chn.rowCommand.command == CMD_ARPEGGIO)
chn.nArpeggioLastNote = chn.nArpeggioBaseNote + step;
else
chn.nArpeggioLastNote = NOTE_NONE;
}
}
#endif // NO_PLUGINS
if(chn.nCommand == CMD_ARPEGGIO)
{
if(chn.HasCustomTuning())
{
switch(m_PlayState.m_nTickCount % 3)
{
case 0: arpeggioSteps = 0; break;
case 1: arpeggioSteps = chn.nArpeggio >> 4; break;
case 2: arpeggioSteps = chn.nArpeggio & 0x0F; break;
}
chn.m_CalculateFreq = true;
chn.m_ReCalculateFreqOnFirstTick = true;
} else
{
if(GetType() == MOD_TYPE_MT2 && m_SongFlags[SONG_FIRSTTICK])
{
// MT2 resets any previous portamento when an arpeggio occurs.
chn.nPeriod = period = GetPeriodFromNote(chn.nNote, chn.nFineTune, chn.nC5Speed);
}
if(m_playBehaviour[kITArpeggio])
{
//IT playback compatibility 01 & 02
// Pattern delay restarts tick counting. Not quite correct yet!
const uint32 tick = m_PlayState.m_nTickCount % (m_PlayState.m_nMusicSpeed + m_PlayState.m_nFrameDelay);
if(chn.nArpeggio != 0)
{
uint32 arpRatio = 65536;
switch(tick % 3)
{
case 1: arpRatio = LinearSlideUpTable[(chn.nArpeggio >> 4) * 16]; break;
case 2: arpRatio = LinearSlideUpTable[(chn.nArpeggio & 0x0F) * 16]; break;
}
if(PeriodsAreFrequencies())
period = Util::muldivr(period, arpRatio, 65536);
else
period = Util::muldivr(period, 65536, arpRatio);
}
} else if(m_playBehaviour[kFT2Arpeggio])
{
// FastTracker 2: Swedish tracker logic (TM) arpeggio
if(!m_SongFlags[SONG_FIRSTTICK])
{
// Arpeggio is added on top of current note, but cannot do it the IT way because of
// the behaviour in ArpeggioClamp.xm.
// Test case: ArpSlide.xm
uint32 note = 0;
// The fact that arpeggio behaves in a totally fucked up way at 16 ticks/row or more is that the arpeggio offset LUT only has 16 entries in FT2.
// At more than 16 ticks/row, FT2 reads into the vibrato table, which is placed right after the arpeggio table.
// Test case: Arpeggio.xm
int arpPos = m_PlayState.m_nMusicSpeed - (m_PlayState.m_nTickCount % m_PlayState.m_nMusicSpeed);
if(arpPos > 16)
arpPos = 2;
else if(arpPos == 16)
arpPos = 0;
else
arpPos %= 3;
switch(arpPos)
{
case 1: note = (chn.nArpeggio >> 4); break;
case 2: note = (chn.nArpeggio & 0x0F); break;
}
if(arpPos != 0)
{
// Arpeggio is added on top of current note, but cannot do it the IT way because of
// the behaviour in ArpeggioClamp.xm.
// Test case: ArpSlide.xm
note += GetNoteFromPeriod(period, chn.nFineTune, chn.nC5Speed);
period = GetPeriodFromNote(note, chn.nFineTune, chn.nC5Speed);
// FT2 compatibility: FT2 has a different note limit for Arpeggio.
// Test case: ArpeggioClamp.xm
if(note >= 108 + NOTE_MIN)
{
period = std::max(static_cast<uint32>(period), GetPeriodFromNote(108 + NOTE_MIN, 0, chn.nC5Speed));
}
}
}
}
// Other trackers
else
{
uint32 tick = m_PlayState.m_nTickCount;
// TODO other likely formats for MOD case: MED, OKT, etc
uint8 note = (GetType() != MOD_TYPE_MOD) ? chn.nNote : static_cast<uint8>(GetNoteFromPeriod(period, chn.nFineTune, chn.nC5Speed));
if(GetType() & (MOD_TYPE_DBM | MOD_TYPE_DIGI))
tick += 2;
switch(tick % 3)
{
case 1: note += (chn.nArpeggio >> 4); break;
case 2: note += (chn.nArpeggio & 0x0F); break;
}
if(note != chn.nNote || (GetType() & (MOD_TYPE_DBM | MOD_TYPE_DIGI | MOD_TYPE_STM)) || m_playBehaviour[KST3PortaAfterArpeggio])
{
if(m_SongFlags[SONG_PT_MODE])
{
// Weird arpeggio wrap-around in ProTracker.
// Test case: ArpWraparound.mod, and the snare sound in "Jim is dead" by doh.
if(note == NOTE_MIDDLEC + 24)
{
period = int32_max;
return;
} else if(note > NOTE_MIDDLEC + 24)
{
note -= 37;
}
}
period = GetPeriodFromNote(note, chn.nFineTune, chn.nC5Speed);
if(GetType() & (MOD_TYPE_DBM | MOD_TYPE_DIGI | MOD_TYPE_PSM | MOD_TYPE_STM | MOD_TYPE_OKT))
{
// The arpeggio note offset remains effective after the end of the current row in ScreamTracker 2.
// This fixes the flute lead in MORPH.STM by Skaven, pattern 27.
// Note that ScreamTracker 2.24 handles arpeggio slightly differently: It only considers the lower
// nibble, and switches to that note halfway through the row.
chn.nPeriod = period;
} else if(m_playBehaviour[KST3PortaAfterArpeggio])
{
chn.nArpeggioLastNote = note;
}
}
}
}
}
}
void CSoundFile::ProcessVibrato(CHANNELINDEX nChn, int32 &period, Tuning::RATIOTYPE &vibratoFactor)
{
ModChannel &chn = m_PlayState.Chn[nChn];
if(chn.dwFlags[CHN_VIBRATO])
{
const bool advancePosition = !m_SongFlags[SONG_FIRSTTICK] || ((GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT)) && !(m_SongFlags[SONG_ITOLDEFFECTS]));
if(GetType() == MOD_TYPE_669)
{
if(chn.nVibratoPos % 2u)
{
period += chn.nVibratoDepth * 167; // Already multiplied by 4, and it seems like the real factor here is 669... how original =)
}
chn.nVibratoPos++;
return;
}
// IT compatibility: IT has its own, more precise tables and pre-increments the vibrato position
if(advancePosition && m_playBehaviour[kITVibratoTremoloPanbrello])
chn.nVibratoPos += 4 * chn.nVibratoSpeed;
int vdelta = GetVibratoDelta(chn.nVibratoType, chn.nVibratoPos);
if(chn.HasCustomTuning())
{
//Hack implementation: Scaling vibratofactor to [0.95; 1.05]
//using figure from above tables and vibratodepth parameter
vibratoFactor += 0.05f * (vdelta * chn.nVibratoDepth) / (128.0f * 60.0f);
chn.m_CalculateFreq = true;
chn.m_ReCalculateFreqOnFirstTick = false;
if(m_PlayState.m_nTickCount + 1 == m_PlayState.m_nMusicSpeed)
chn.m_ReCalculateFreqOnFirstTick = true;
} else
{
// Original behaviour
if(m_SongFlags.test_all(SONG_FIRSTTICK | SONG_PT_MODE) || ((GetType() & (MOD_TYPE_DIGI | MOD_TYPE_DBM)) && m_SongFlags[SONG_FIRSTTICK]))
{
// ProTracker doesn't apply vibrato nor advance on the first tick.
// Test case: VibratoReset.mod
return;
} else if((GetType() & (MOD_TYPE_XM | MOD_TYPE_MOD)) && (chn.nVibratoType & 0x03) == 1)
{
// FT2 compatibility: Vibrato ramp down table is upside down.
// Test case: VibratoWaveforms.xm
vdelta = -vdelta;
}
uint32 vdepth;
// IT compatibility: correct vibrato depth
if(m_playBehaviour[kITVibratoTremoloPanbrello])
{
// Yes, vibrato goes backwards with old effects enabled!
if(m_SongFlags[SONG_ITOLDEFFECTS])
{
// Test case: vibrato-oldfx.it
vdepth = 5;
} else
{
// Test case: vibrato.it
vdepth = 6;
vdelta = -vdelta;
}
} else
{
if(m_SongFlags[SONG_S3MOLDVIBRATO])
vdepth = 5;
else if(GetType() == MOD_TYPE_DTM)
vdepth = 8;
else if(GetType() & (MOD_TYPE_DBM | MOD_TYPE_MTM))
vdepth = 7;
else if((GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT)) && !m_SongFlags[SONG_ITOLDEFFECTS])
vdepth = 7;
else
vdepth = 6;
// ST3 compatibility: Do not distinguish between vibrato types in effect memory
// Test case: VibratoTypeChange.s3m
if(m_playBehaviour[kST3VibratoMemory] && chn.rowCommand.command == CMD_FINEVIBRATO)
vdepth += 2;
}
vdelta = (-vdelta * static_cast<int>(chn.nVibratoDepth)) / (1 << vdepth);
DoFreqSlide(chn, period, vdelta);
// Process MIDI vibrato for plugins:
#ifndef NO_PLUGINS
IMixPlugin *plugin = GetChannelInstrumentPlugin(nChn);
if(plugin != nullptr)
{
// If the Pitch Wheel Depth is configured correctly (so it's the same as the plugin's PWD),
// MIDI vibrato will sound identical to vibrato with linear slides enabled.
int8 pwd = 2;
if(chn.pModInstrument != nullptr)
{
pwd = chn.pModInstrument->midiPWD;
}
plugin->MidiVibrato(vdelta, pwd, nChn);
}
#endif // NO_PLUGINS
}
// Advance vibrato position - IT updates on every tick, unless "old effects" are enabled (in this case it only updates on non-first ticks like other trackers)
// IT compatibility: IT has its own, more precise tables and pre-increments the vibrato position
if(advancePosition && !m_playBehaviour[kITVibratoTremoloPanbrello])
chn.nVibratoPos += chn.nVibratoSpeed;
} else if(chn.dwOldFlags[CHN_VIBRATO])
{
// Stop MIDI vibrato for plugins:
#ifndef NO_PLUGINS
IMixPlugin *plugin = GetChannelInstrumentPlugin(nChn);
if(plugin != nullptr)
{
plugin->MidiVibrato(0, 0, nChn);
}
#endif // NO_PLUGINS
}
}
void CSoundFile::ProcessSampleAutoVibrato(ModChannel &chn, int32 &period, Tuning::RATIOTYPE &vibratoFactor, int &nPeriodFrac) const
{
// Sample Auto-Vibrato
if(chn.pModSample != nullptr && chn.pModSample->nVibDepth)
{
const ModSample *pSmp = chn.pModSample;
const bool hasTuning = chn.HasCustomTuning();
// In IT compatible mode, we use always frequencies, otherwise we use periods, which are upside down.
// In this context, the "up" tables refer to the tables that increase frequency, and the down tables are the ones that decrease frequency.
const bool useFreq = PeriodsAreFrequencies();
const uint32 (&upTable)[256] = useFreq ? LinearSlideUpTable : LinearSlideDownTable;
const uint32 (&downTable)[256] = useFreq ? LinearSlideDownTable : LinearSlideUpTable;
const uint32 (&fineUpTable)[16] = useFreq ? FineLinearSlideUpTable : FineLinearSlideDownTable;
const uint32 (&fineDownTable)[16] = useFreq ? FineLinearSlideDownTable : FineLinearSlideUpTable;
// IT compatibility: Autovibrato is so much different in IT that I just put this in a separate code block, to get rid of a dozen IsCompatibilityMode() calls.
if(m_playBehaviour[kITVibratoTremoloPanbrello] && !hasTuning && GetType() != MOD_TYPE_MT2)
{
if(!pSmp->nVibRate)
return;
// Schism's autovibrato code
/*
X86 Assembler from ITTECH.TXT:
1) Mov AX, [SomeVariableNameRelatingToVibrato]
2) Add AL, Rate
3) AdC AH, 0
4) AH contains the depth of the vibrato as a fine-linear slide.
5) Mov [SomeVariableNameRelatingToVibrato], AX ; For the next cycle.
*/
const int vibpos = chn.nAutoVibPos & 0xFF;
int adepth = chn.nAutoVibDepth; // (1)
adepth += pSmp->nVibSweep; // (2 & 3)
LimitMax(adepth, static_cast<int>(pSmp->nVibDepth * 256u));
chn.nAutoVibDepth = adepth; // (5)
adepth /= 256; // (4)
chn.nAutoVibPos += pSmp->nVibRate;
int vdelta;
switch(pSmp->nVibType)
{
case VIB_RANDOM:
vdelta = mpt::random<int, 7>(AccessPRNG()) - 0x40;
break;
case VIB_RAMP_DOWN:
vdelta = 64 - (vibpos + 1) / 2;
break;
case VIB_RAMP_UP:
vdelta = ((vibpos + 1) / 2) - 64;
break;
case VIB_SQUARE:
vdelta = vibpos < 128 ? 64 : 0;
break;
case VIB_SINE:
default:
vdelta = ITSinusTable[vibpos];
break;
}
vdelta = (vdelta * adepth) / 64;
uint32 l = std::abs(vdelta);
LimitMax(period, Util::MaxValueOfType(period) / 256);
period *= 256;
if(vdelta < 0)
{
vdelta = Util::muldiv(period, downTable[l / 4u], 0x10000) - period;
if (l & 0x03)
{
vdelta += Util::muldiv(period, fineDownTable[l & 0x03], 0x10000) - period;
}
} else
{
vdelta = Util::muldiv(period, upTable[l / 4u], 0x10000) - period;
if (l & 0x03)
{
vdelta += Util::muldiv(period, fineUpTable[l & 0x03], 0x10000) - period;
}
}
period = (period + vdelta) / 256;
nPeriodFrac = vdelta & 0xFF;
} else
{
// MPT's autovibrato code
if (pSmp->nVibSweep == 0 && !(GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT)))
{
chn.nAutoVibDepth = pSmp->nVibDepth * 256;
} else
{
// Calculate current autovibrato depth using vibsweep
if (GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT))
{
chn.nAutoVibDepth += pSmp->nVibSweep * 2u;
} else
{
if(!chn.dwFlags[CHN_KEYOFF])
{
chn.nAutoVibDepth += (pSmp->nVibDepth * 256u) / pSmp->nVibSweep;
}
}
LimitMax(chn.nAutoVibDepth, static_cast<int>(pSmp->nVibDepth * 256u));
}
chn.nAutoVibPos += pSmp->nVibRate;
int vdelta;
switch(pSmp->nVibType)
{
case VIB_RANDOM:
vdelta = ModRandomTable[chn.nAutoVibPos & 0x3F];
chn.nAutoVibPos++;
break;
case VIB_RAMP_DOWN:
vdelta = ((0x40 - (chn.nAutoVibPos / 2u)) & 0x7F) - 0x40;
break;
case VIB_RAMP_UP:
vdelta = ((0x40 + (chn.nAutoVibPos / 2u)) & 0x7F) - 0x40;
break;
case VIB_SQUARE:
vdelta = (chn.nAutoVibPos & 128) ? +64 : -64;
break;
case VIB_SINE:
default:
if(GetType() != MOD_TYPE_MT2)
{
vdelta = -ITSinusTable[chn.nAutoVibPos & 0xFF];
} else
{
// Fix flat-sounding pads in "another worlds" by Eternal Engine.
// Vibrato starts at the maximum amplitude of the sine wave
// and the vibrato frequency never decreases below the original note's frequency.
vdelta = (-ITSinusTable[(chn.nAutoVibPos + 192) & 0xFF] + 64) / 2;
}
}
int n = (vdelta * chn.nAutoVibDepth) / 256;
if(hasTuning)
{
//Vib sweep is not taken into account here.
vibratoFactor += 0.05F * pSmp->nVibDepth * vdelta / 4096.0f; //4096 == 64^2
//See vibrato for explanation.
chn.m_CalculateFreq = true;
/*
Finestep vibrato:
const float autoVibDepth = pSmp->nVibDepth * val / 4096.0f; //4096 == 64^2
vibratoFineSteps += static_cast<CTuning::FINESTEPTYPE>(chn.pModInstrument->pTuning->GetFineStepCount() * autoVibDepth);
chn.m_CalculateFreq = true;
*/
}
else //Original behavior
{
if (GetType() != MOD_TYPE_XM)
{
int df1, df2;
if (n < 0)
{
n = -n;
uint32 n1 = n / 256;
df1 = downTable[n1];
df2 = downTable[n1+1];
} else
{
uint32 n1 = n / 256;
df1 = upTable[n1];
df2 = upTable[n1+1];
}
n /= 4;
period = Util::muldiv(period, df1 + ((df2 - df1) * (n & 0x3F) / 64), 256);
nPeriodFrac = period & 0xFF;
period /= 256;
} else
{
period += (n / 64);
}
} //Original MPT behavior
}
}
}
void CSoundFile::ProcessRamping(ModChannel &chn) const
{
chn.leftRamp = chn.rightRamp = 0;
LimitMax(chn.newLeftVol, int32_max >> VOLUMERAMPPRECISION);
LimitMax(chn.newRightVol, int32_max >> VOLUMERAMPPRECISION);
if(chn.dwFlags[CHN_VOLUMERAMP] && (chn.leftVol != chn.newLeftVol || chn.rightVol != chn.newRightVol))
{
const bool rampUp = (chn.newLeftVol > chn.leftVol) || (chn.newRightVol > chn.rightVol);
int32 rampLength, globalRampLength, instrRampLength = 0;
rampLength = globalRampLength = (rampUp ? m_MixerSettings.GetVolumeRampUpSamples() : m_MixerSettings.GetVolumeRampDownSamples());
//XXXih: add real support for bidi ramping here
if(m_playBehaviour[kFT2VolumeRamping] && (GetType() & MOD_TYPE_XM))
{
// apply FT2-style super-soft volume ramping (5ms), overriding openmpt settings
rampLength = globalRampLength = Util::muldivr(5, m_MixerSettings.gdwMixingFreq, 1000);
}
if(chn.pModInstrument != nullptr && rampUp)
{
instrRampLength = chn.pModInstrument->nVolRampUp;
rampLength = instrRampLength ? (m_MixerSettings.gdwMixingFreq * instrRampLength / 100000) : globalRampLength;
}
const bool enableCustomRamp = (instrRampLength > 0);
if(!rampLength)
{
rampLength = 1;
}
int32 leftDelta = ((chn.newLeftVol - chn.leftVol) * (1 << VOLUMERAMPPRECISION));
int32 rightDelta = ((chn.newRightVol - chn.rightVol) * (1 << VOLUMERAMPPRECISION));
if(!enableCustomRamp)
{
// Extra-smooth ramping, unless we're forced to use the default values
if((chn.leftVol | chn.rightVol) && (chn.newLeftVol | chn.newRightVol) && !chn.dwFlags[CHN_FASTVOLRAMP])
{
rampLength = m_PlayState.m_nBufferCount;
Limit(rampLength, globalRampLength, int32(1 << (VOLUMERAMPPRECISION - 1)));
}
}
chn.leftRamp = leftDelta / rampLength;
chn.rightRamp = rightDelta / rampLength;
chn.leftVol = chn.newLeftVol - ((chn.leftRamp * rampLength) / (1 << VOLUMERAMPPRECISION));
chn.rightVol = chn.newRightVol - ((chn.rightRamp * rampLength) / (1 << VOLUMERAMPPRECISION));
if (chn.leftRamp|chn.rightRamp)
{
chn.nRampLength = rampLength;
} else
{
chn.dwFlags.reset(CHN_VOLUMERAMP);
chn.leftVol = chn.newLeftVol;
chn.rightVol = chn.newRightVol;
}
} else
{
chn.dwFlags.reset(CHN_VOLUMERAMP);
chn.leftVol = chn.newLeftVol;
chn.rightVol = chn.newRightVol;
}
chn.rampLeftVol = chn.leftVol * (1 << VOLUMERAMPPRECISION);
chn.rampRightVol = chn.rightVol * (1 << VOLUMERAMPPRECISION);
chn.dwFlags.reset(CHN_FASTVOLRAMP);
}
// Returns channel increment and frequency with FREQ_FRACBITS fractional bits
std::pair<SamplePosition, uint32> CSoundFile::GetChannelIncrement(const ModChannel &chn, uint32 period, int periodFrac) const
{
uint32 freq;
if(!chn.HasCustomTuning())
freq = GetFreqFromPeriod(period, chn.nC5Speed, periodFrac);
else
freq = chn.nPeriod;
const ModInstrument *ins = chn.pModInstrument;
if(int32 finetune = chn.microTuning; finetune != 0)
{
if(ins)
finetune *= ins->midiPWD;
if(finetune)
freq = mpt::saturate_round<uint32>(freq * std::pow(2.0, finetune / (12.0 * 256.0 * 128.0)));
}
// Applying Pitch/Tempo lock
if(ins && ins->pitchToTempoLock.GetRaw())
{
freq = Util::muldivr(freq, m_PlayState.m_nMusicTempo.GetRaw(), ins->pitchToTempoLock.GetRaw());
}
// Avoid increment to overflow and become negative with unrealisticly high frequencies.
LimitMax(freq, uint32(int32_max));
return {SamplePosition::Ratio(freq, m_MixerSettings.gdwMixingFreq << FREQ_FRACBITS), freq};
}
////////////////////////////////////////////////////////////////////////////////////////////
// Handles envelopes & mixer setup
bool CSoundFile::ReadNote()
{
#ifdef MODPLUG_TRACKER
// Checking end of row ?
if(m_SongFlags[SONG_PAUSED])
{
m_PlayState.m_nTickCount = 0;
if (!m_PlayState.m_nMusicSpeed) m_PlayState.m_nMusicSpeed = 6;
if (!m_PlayState.m_nMusicTempo.GetRaw()) m_PlayState.m_nMusicTempo.Set(125);
} else
#endif // MODPLUG_TRACKER
{
if(!ProcessRow())
return false;
}
////////////////////////////////////////////////////////////////////////////////////
if (m_PlayState.m_nMusicTempo.GetRaw() == 0) return false;
m_PlayState.m_nSamplesPerTick = GetTickDuration(m_PlayState);
m_PlayState.m_nBufferCount = m_PlayState.m_nSamplesPerTick;
// Master Volume + Pre-Amplification / Attenuation setup
uint32 nMasterVol;
{
CHANNELINDEX nchn32 = Clamp(m_nChannels, CHANNELINDEX(1), CHANNELINDEX(31));
uint32 mastervol;
if (m_PlayConfig.getUseGlobalPreAmp())
{
int realmastervol = m_MixerSettings.m_nPreAmp;
if (realmastervol > 0x80)
{
//Attenuate global pre-amp depending on num channels
realmastervol = 0x80 + ((realmastervol - 0x80) * (nchn32 + 4)) / 16;
}
mastervol = (realmastervol * (m_nSamplePreAmp)) / 64;
} else
{
//Preferred option: don't use global pre-amp at all.
mastervol = m_nSamplePreAmp;
}
if (m_PlayConfig.getUseGlobalPreAmp())
{
uint32 attenuation =
#ifndef NO_AGC
(m_MixerSettings.DSPMask & SNDDSP_AGC) ? PreAmpAGCTable[nchn32 / 2u] :
#endif
PreAmpTable[nchn32 / 2u];
if(attenuation < 1) attenuation = 1;
nMasterVol = (mastervol << 7) / attenuation;
} else
{
nMasterVol = mastervol;
}
}
////////////////////////////////////////////////////////////////////////////////////
// Update channels data
m_nMixChannels = 0;
for (CHANNELINDEX nChn = 0; nChn < MAX_CHANNELS; nChn++)
{
ModChannel &chn = m_PlayState.Chn[nChn];
// FT2 Compatibility: Prevent notes to be stopped after a fadeout. This way, a portamento effect can pick up a faded instrument which is long enough.
// This occurs for example in the bassline (channel 11) of jt_burn.xm. I hope this won't break anything else...
// I also suppose this could decrease mixing performance a bit, but hey, which CPU can't handle 32 muted channels these days... :-)
if(chn.dwFlags[CHN_NOTEFADE] && (!(chn.nFadeOutVol|chn.leftVol|chn.rightVol)) && !m_playBehaviour[kFT2ProcessSilentChannels])
{
chn.nLength = 0;
chn.nROfs = chn.nLOfs = 0;
}
// Check for unused channel
if(chn.dwFlags[CHN_MUTE] || (nChn >= m_nChannels && !chn.nLength))
{
if(nChn < m_nChannels)
{
// Process MIDI macros on channels that are currently muted.
ProcessMacroOnChannel(nChn);
}
chn.nLeftVU = chn.nRightVU = 0;
continue;
}
// Reset channel data
chn.increment = SamplePosition(0);
chn.nRealVolume = 0;
chn.nCalcVolume = 0;
chn.nRampLength = 0;
//Aux variables
Tuning::RATIOTYPE vibratoFactor = 1;
Tuning::NOTEINDEXTYPE arpeggioSteps = 0;
const ModInstrument *pIns = chn.pModInstrument;
// Calc Frequency
int32 period = 0;
// Also process envelopes etc. when there's a plugin on this channel, for possible fake automation using volume and pan data.
// We only care about master channels, though, since automation only "happens" on them.
const bool samplePlaying = (chn.nPeriod && chn.nLength);
const bool plugAssigned = (nChn < m_nChannels) && (ChnSettings[nChn].nMixPlugin || (chn.pModInstrument != nullptr && chn.pModInstrument->nMixPlug));
if (samplePlaying || plugAssigned)
{
int vol = chn.nVolume;
int insVol = chn.nInsVol; // This is the "SV * IV" value in ITTECH.TXT
ProcessVolumeSwing(chn, m_playBehaviour[kITSwingBehaviour] ? insVol : vol);
ProcessPanningSwing(chn);
ProcessTremolo(chn, vol);
ProcessTremor(nChn, vol);
// Clip volume and multiply (extend to 14 bits)
Limit(vol, 0, 256);
vol <<= 6;
// Process Envelopes
if (pIns)
{
if(m_playBehaviour[kITEnvelopePositionHandling])
{
// In IT compatible mode, envelope position indices are shifted by one for proper envelope pausing,
// so we have to update the position before we actually process the envelopes.
// When using MPT behaviour, we get the envelope position for the next tick while we are still calculating the current tick,
// which then results in wrong position information when the envelope is paused on the next row.
// Test cases: s77.it
IncrementEnvelopePositions(chn);
}
ProcessVolumeEnvelope(chn, vol);
ProcessInstrumentFade(chn, vol);
ProcessPanningEnvelope(chn);
if(!m_playBehaviour[kITPitchPanSeparation] && chn.nNote != NOTE_NONE && chn.pModInstrument && chn.pModInstrument->nPPS != 0)
ProcessPitchPanSeparation(chn.nRealPan, chn.nNote, *chn.pModInstrument);
} else
{
// No Envelope: key off => note cut
if(chn.dwFlags[CHN_NOTEFADE]) // 1.41-: CHN_KEYOFF|CHN_NOTEFADE
{
chn.nFadeOutVol = 0;
vol = 0;
}
}
if(chn.isPaused)
vol = 0;
// vol is 14-bits
if (vol)
{
// IMPORTANT: chn.nRealVolume is 14 bits !!!
// -> Util::muldiv( 14+8, 6+6, 18); => RealVolume: 14-bit result (22+12-20)
if(chn.dwFlags[CHN_SYNCMUTE])
{
chn.nRealVolume = 0;
} else if (m_PlayConfig.getGlobalVolumeAppliesToMaster())
{
// Don't let global volume affect level of sample if
// Global volume is going to be applied to master output anyway.
chn.nRealVolume = Util::muldiv(vol * MAX_GLOBAL_VOLUME, chn.nGlobalVol * insVol, 1 << 20);
} else
{
chn.nRealVolume = Util::muldiv(vol * m_PlayState.m_nGlobalVolume, chn.nGlobalVol * insVol, 1 << 20);
}
}
chn.nCalcVolume = vol; // Update calculated volume for MIDI macros
// ST3 only clamps the final output period, but never the channel's internal period.
// Test case: PeriodLimit.s3m
if (chn.nPeriod < m_nMinPeriod
&& GetType() != MOD_TYPE_S3M
&& !PeriodsAreFrequencies())
{
chn.nPeriod = m_nMinPeriod;
} else if(chn.nPeriod >= m_nMaxPeriod && m_playBehaviour[kApplyUpperPeriodLimit] && !PeriodsAreFrequencies())
{
// ...but on the other hand, ST3's SoundBlaster driver clamps the maximum channel period.
// Test case: PeriodLimitUpper.s3m
chn.nPeriod = m_nMaxPeriod;
}
if(m_playBehaviour[kFT2Periods]) Clamp(chn.nPeriod, 1, 31999);
period = chn.nPeriod;
// When glissando mode is set to semitones, clamp to the next halftone.
if((chn.dwFlags & (CHN_GLISSANDO | CHN_PORTAMENTO)) == (CHN_GLISSANDO | CHN_PORTAMENTO)
&& (!m_SongFlags[SONG_PT_MODE] || (chn.rowCommand.IsPortamento() && !m_SongFlags[SONG_FIRSTTICK])))
{
if(period != chn.cachedPeriod)
{
// Only recompute this whole thing in case the base period has changed.
chn.cachedPeriod = period;
chn.glissandoPeriod = GetPeriodFromNote(GetNoteFromPeriod(period, chn.nFineTune, chn.nC5Speed), chn.nFineTune, chn.nC5Speed);
}
period = chn.glissandoPeriod;
}
ProcessArpeggio(nChn, period, arpeggioSteps);
// Preserve Amiga freq limits.
// In ST3, the frequency is always clamped to periods 113 to 856, while in ProTracker,
// the limit is variable, depending on the finetune of the sample.
// The int32_max test is for the arpeggio wrap-around in ProcessArpeggio().
// Test case: AmigaLimits.s3m, AmigaLimitsFinetune.mod
if(m_SongFlags[SONG_AMIGALIMITS | SONG_PT_MODE] && period != int32_max)
{
int limitLow = 113 * 4, limitHigh = 856 * 4;
if(GetType() != MOD_TYPE_S3M)
{
const int tableOffset = XM2MODFineTune(chn.nFineTune) * 12;
limitLow = ProTrackerTunedPeriods[tableOffset + 11] / 2;
limitHigh = ProTrackerTunedPeriods[tableOffset] * 2;
// Amiga cannot actually keep up with lower periods
if(limitLow < 113 * 4) limitLow = 113 * 4;
}
Limit(period, limitLow, limitHigh);
Limit(chn.nPeriod, limitLow, limitHigh);
}
ProcessPanbrello(chn);
}
// IT Compatibility: Ensure that there is no pan swing, panbrello, panning envelopes, etc. applied on surround channels.
// Test case: surround-pan.it
if(chn.dwFlags[CHN_SURROUND] && !m_SongFlags[SONG_SURROUNDPAN] && m_playBehaviour[kITNoSurroundPan])
{
chn.nRealPan = 128;
}
// Now that all relevant envelopes etc. have been processed, we can parse the MIDI macro data.
ProcessMacroOnChannel(nChn);
// After MIDI macros have been processed, we can also process the pitch / filter envelope and other pitch-related things.
if(samplePlaying)
{
int cutoff = ProcessPitchFilterEnvelope(chn, period);
if(cutoff >= 0 && chn.dwFlags[CHN_ADLIB] && m_opl)
{
// Cutoff doubles as modulator intensity for FM instruments
m_opl->Volume(nChn, static_cast<uint8>(cutoff / 4), true);
}
}
if(chn.rowCommand.volcmd == VOLCMD_VIBRATODEPTH &&
(chn.rowCommand.command == CMD_VIBRATO || chn.rowCommand.command == CMD_VIBRATOVOL || chn.rowCommand.command == CMD_FINEVIBRATO))
{
if(GetType() == MOD_TYPE_XM)
{
// XM Compatibility: Vibrato should be advanced twice (but not added up) if both volume-column and effect column vibrato is present.
// Effect column vibrato parameter has precedence if non-zero.
// Test case: VibratoDouble.xm
if(!m_SongFlags[SONG_FIRSTTICK])
chn.nVibratoPos += chn.nVibratoSpeed;
} else if(GetType() & (MOD_TYPE_IT | MOD_TYPE_MPT))
{
// IT Compatibility: Vibrato should be applied twice if both volume-colum and effect column vibrato is present.
// Volume column vibrato parameter has precedence if non-zero.
// Test case: VibratoDouble.it
Vibrato(chn, chn.rowCommand.vol);
ProcessVibrato(nChn, period, vibratoFactor);
}
}
// Plugins may also receive vibrato
ProcessVibrato(nChn, period, vibratoFactor);
if(samplePlaying)
{
int nPeriodFrac = 0;
ProcessSampleAutoVibrato(chn, period, vibratoFactor, nPeriodFrac);
// Final Period
// ST3 only clamps the final output period, but never the channel's internal period.
// Test case: PeriodLimit.s3m
if (period <= m_nMinPeriod)
{
if(m_playBehaviour[kST3LimitPeriod]) chn.nLength = 0; // Pattern 15 in watcha.s3m
period = m_nMinPeriod;
}
const bool hasTuning = chn.HasCustomTuning();
if(hasTuning)
{
if(chn.m_CalculateFreq || (chn.m_ReCalculateFreqOnFirstTick && m_PlayState.m_nTickCount == 0))
{
chn.RecalcTuningFreq(vibratoFactor, arpeggioSteps, *this);
if(!chn.m_CalculateFreq)
chn.m_ReCalculateFreqOnFirstTick = false;
else
chn.m_CalculateFreq = false;
}
}
auto [ninc, freq] = GetChannelIncrement(chn, period, nPeriodFrac);
#ifndef MODPLUG_TRACKER
ninc.MulDiv(m_nFreqFactor, 65536);
#endif // !MODPLUG_TRACKER
if(ninc.IsZero())
{
ninc.Set(0, 1);
}
chn.increment = ninc;
if((chn.dwFlags & (CHN_ADLIB | CHN_MUTE | CHN_SYNCMUTE)) == CHN_ADLIB && m_opl)
{
const bool doProcess = m_playBehaviour[kOPLFlexibleNoteOff] || !chn.dwFlags[CHN_NOTEFADE] || GetType() == MOD_TYPE_S3M;
if(doProcess && !(GetType() == MOD_TYPE_S3M && chn.dwFlags[CHN_KEYOFF]))
{
// In ST3, a sample rate of 8363 Hz is mapped to middle-C, which is 261.625 Hz in a tempered scale at A4 = 440.
// Hence, we have to translate our "sample rate" into pitch.
auto milliHertz = Util::muldivr_unsigned(freq, 261625, 8363 << FREQ_FRACBITS);
const bool keyOff = chn.dwFlags[CHN_KEYOFF] || (chn.dwFlags[CHN_NOTEFADE] && chn.nFadeOutVol == 0);
if(!m_playBehaviour[kOPLNoteStopWith0Hz] || !keyOff)
m_opl->Frequency(nChn, milliHertz, keyOff, m_playBehaviour[kOPLBeatingOscillators]);
}
if(doProcess)
{
// Scale volume to OPL range (0...63).
m_opl->Volume(nChn, static_cast<uint8>(Util::muldivr_unsigned(chn.nCalcVolume * chn.nGlobalVol * chn.nInsVol, 63, 1 << 26)), false);
chn.nRealPan = m_opl->Pan(nChn, chn.nRealPan) * 128 + 128;
}
// Deallocate OPL channels for notes that are most definitely never going to play again.
if(const auto *ins = chn.pModInstrument; ins != nullptr
&& (ins->VolEnv.dwFlags & (ENV_ENABLED | ENV_LOOP | ENV_SUSTAIN)) == ENV_ENABLED
&& !ins->VolEnv.empty()
&& chn.GetEnvelope(ENV_VOLUME).nEnvPosition >= ins->VolEnv.back().tick
&& ins->VolEnv.back().value == 0)
{
m_opl->NoteCut(nChn);
if(!m_playBehaviour[kOPLNoResetAtEnvelopeEnd])
chn.dwFlags.reset(CHN_ADLIB);
chn.dwFlags.set(CHN_NOTEFADE);
chn.nFadeOutVol = 0;
} else if(m_playBehaviour[kOPLFlexibleNoteOff] && chn.dwFlags[CHN_NOTEFADE] && chn.nFadeOutVol == 0)
{
m_opl->NoteCut(nChn);
chn.dwFlags.reset(CHN_ADLIB);
}
}
}
// Increment envelope positions
if(pIns != nullptr && !m_playBehaviour[kITEnvelopePositionHandling])
{
// In IT and FT2 compatible mode, envelope positions are updated above.
// Test cases: s77.it, EnvLoops.xm
IncrementEnvelopePositions(chn);
}
// Volume ramping
chn.dwFlags.set(CHN_VOLUMERAMP, (chn.nRealVolume | chn.rightVol | chn.leftVol) != 0 && !chn.dwFlags[CHN_ADLIB]);
constexpr uint8 VUMETER_DECAY = 4;
chn.nLeftVU = (chn.nLeftVU > VUMETER_DECAY) ? (chn.nLeftVU - VUMETER_DECAY) : 0;
chn.nRightVU = (chn.nRightVU > VUMETER_DECAY) ? (chn.nRightVU - VUMETER_DECAY) : 0;
chn.newLeftVol = chn.newRightVol = 0;
chn.pCurrentSample = (chn.pModSample && chn.pModSample->HasSampleData() && chn.nLength && chn.IsSamplePlaying()) ? chn.pModSample->samplev() : nullptr;
if(chn.pCurrentSample || (chn.HasMIDIOutput() && !chn.dwFlags[CHN_KEYOFF | CHN_NOTEFADE]))
{
// Update VU-Meter (nRealVolume is 14-bit)
uint32 vul = (chn.nRealVolume * (256-chn.nRealPan)) / (1 << 14);
if (vul > 127) vul = 127;
if (chn.nLeftVU > 127) chn.nLeftVU = (uint8)vul;
vul /= 2;
if (chn.nLeftVU < vul) chn.nLeftVU = (uint8)vul;
uint32 vur = (chn.nRealVolume * chn.nRealPan) / (1 << 14);
if (vur > 127) vur = 127;
if (chn.nRightVU > 127) chn.nRightVU = (uint8)vur;
vur /= 2;
if (chn.nRightVU < vur) chn.nRightVU = (uint8)vur;
} else
{
// Note change but no sample
if (chn.nLeftVU > 128) chn.nLeftVU = 0;
if (chn.nRightVU > 128) chn.nRightVU = 0;
}
if (chn.pCurrentSample)
{
#ifdef MODPLUG_TRACKER
const uint32 kChnMasterVol = chn.dwFlags[CHN_EXTRALOUD] ? (uint32)m_PlayConfig.getNormalSamplePreAmp() : nMasterVol;
#else
const uint32 kChnMasterVol = nMasterVol;
#endif // MODPLUG_TRACKER
// Adjusting volumes
{
int32 pan = (m_MixerSettings.gnChannels >= 2) ? Clamp(chn.nRealPan, 0, 256) : 128;
int32 realvol;
if(m_PlayConfig.getUseGlobalPreAmp())
{
realvol = (chn.nRealVolume * kChnMasterVol) / 128;
} else
{
// Extra attenuation required here if we're bypassing pre-amp.
realvol = (chn.nRealVolume * kChnMasterVol) / 256;
}
const PanningMode panningMode = m_PlayConfig.getPanningMode();
if(panningMode == PanningMode::SoftPanning || (panningMode == PanningMode::Undetermined && (m_MixerSettings.MixerFlags & SNDMIX_SOFTPANNING)))
{
if(pan < 128)
{
chn.newLeftVol = (realvol * 128) / 256;
chn.newRightVol = (realvol * pan) / 256;
} else
{
chn.newLeftVol = (realvol * (256 - pan)) / 256;
chn.newRightVol = (realvol * 128) / 256;
}
} else if(panningMode == PanningMode::FT2Panning)
{
// FT2 uses square root panning. There is a 257-entry LUT for this,
// but FT2's internal panning ranges from 0 to 255 only, meaning that
// you can never truly achieve 100% right panning in FT2, only 100% left.
// Test case: FT2PanLaw.xm
LimitMax(pan, 255);
const int panL = pan > 0 ? XMPanningTable[256 - pan] : 65536;
const int panR = XMPanningTable[pan];
chn.newLeftVol = (realvol * panL) / 65536;
chn.newRightVol = (realvol * panR) / 65536;
} else
{
chn.newLeftVol = (realvol * (256 - pan)) / 256;
chn.newRightVol = (realvol * pan) / 256;
}
}
// Clipping volumes
//if (chn.nNewRightVol > 0xFFFF) chn.nNewRightVol = 0xFFFF;
//if (chn.nNewLeftVol > 0xFFFF) chn.nNewLeftVol = 0xFFFF;
if(chn.pModInstrument && Resampling::IsKnownMode(chn.pModInstrument->resampling))
{
// For defined resampling modes, use per-instrument resampling mode if set
chn.resamplingMode = chn.pModInstrument->resampling;
} else if(Resampling::IsKnownMode(m_nResampling))
{
chn.resamplingMode = m_nResampling;
} else if(m_SongFlags[SONG_ISAMIGA] && m_Resampler.m_Settings.emulateAmiga != Resampling::AmigaFilter::Off)
{
// Enforce Amiga resampler for Amiga modules
chn.resamplingMode = SRCMODE_AMIGA;
} else
{
// Default to global mixer settings
chn.resamplingMode = m_Resampler.m_Settings.SrcMode;
}
if(chn.increment.IsUnity() && !(chn.dwFlags[CHN_VIBRATO] || chn.nAutoVibDepth || chn.resamplingMode == SRCMODE_AMIGA))
{
// Exact sample rate match, do not interpolate at all
// - unless vibrato is applied, because in this case the constant enabling and disabling
// of resampling can introduce clicks (this is easily observable with a sine sample
// played at the mix rate).
chn.resamplingMode = SRCMODE_NEAREST;
}
const int extraAttenuation = m_PlayConfig.getExtraSampleAttenuation();
chn.newLeftVol /= (1 << extraAttenuation);
chn.newRightVol /= (1 << extraAttenuation);
// Dolby Pro-Logic Surround
if(chn.dwFlags[CHN_SURROUND] && m_MixerSettings.gnChannels == 2) chn.newRightVol = -chn.newRightVol;
// Checking Ping-Pong Loops
if(chn.dwFlags[CHN_PINGPONGFLAG]) chn.increment.Negate();
// Setting up volume ramp
ProcessRamping(chn);
// Adding the channel in the channel list
if(!chn.dwFlags[CHN_ADLIB])
{
m_PlayState.ChnMix[m_nMixChannels++] = nChn;
}
} else
{
chn.rightVol = chn.leftVol = 0;
chn.nLength = 0;
// Put the channel back into the mixer for end-of-sample pop reduction
if(chn.nLOfs || chn.nROfs)
m_PlayState.ChnMix[m_nMixChannels++] = nChn;
}
chn.dwOldFlags = chn.dwFlags;
}
// If there are more channels being mixed than allowed, order them by volume and discard the most quiet ones
if(m_nMixChannels >= m_MixerSettings.m_nMaxMixChannels)
{
std::partial_sort(std::begin(m_PlayState.ChnMix), std::begin(m_PlayState.ChnMix) + m_MixerSettings.m_nMaxMixChannels, std::begin(m_PlayState.ChnMix) + m_nMixChannels,
[this](CHANNELINDEX i, CHANNELINDEX j) { return (m_PlayState.Chn[i].nRealVolume > m_PlayState.Chn[j].nRealVolume); });
}
return true;
}
void CSoundFile::ProcessMacroOnChannel(CHANNELINDEX nChn)
{
ModChannel &chn = m_PlayState.Chn[nChn];
if(nChn < GetNumChannels())
{
// TODO evaluate per-plugin macros here
//ProcessMIDIMacro(nChn, false, m_MidiCfg.szMidiGlb[MIDIOUT_PAN]);
//ProcessMIDIMacro(nChn, false, m_MidiCfg.szMidiGlb[MIDIOUT_VOLUME]);
if((chn.rowCommand.command == CMD_MIDI && m_SongFlags[SONG_FIRSTTICK]) || chn.rowCommand.command == CMD_SMOOTHMIDI)
{
if(chn.rowCommand.param < 0x80)
ProcessMIDIMacro(nChn, (chn.rowCommand.command == CMD_SMOOTHMIDI), m_MidiCfg.szMidiSFXExt[chn.nActiveMacro], chn.rowCommand.param);
else
ProcessMIDIMacro(nChn, (chn.rowCommand.command == CMD_SMOOTHMIDI), m_MidiCfg.szMidiZXXExt[(chn.rowCommand.param & 0x7F)], 0);
}
}
}
#ifndef NO_PLUGINS
void CSoundFile::ProcessMidiOut(CHANNELINDEX nChn)
{
ModChannel &chn = m_PlayState.Chn[nChn];
// Do we need to process MIDI?
// For now there is no difference between mute and sync mute with VSTis.
if(chn.dwFlags[CHN_MUTE | CHN_SYNCMUTE] || !chn.HasMIDIOutput()) return;
// Get instrument info and plugin reference
const ModInstrument *pIns = chn.pModInstrument; // Can't be nullptr at this point, as we have valid MIDI output.
// No instrument or muted instrument?
if(pIns->dwFlags[INS_MUTE])
{
return;
}
// Check instrument plugins
const PLUGINDEX nPlugin = GetBestPlugin(nChn, PrioritiseInstrument, RespectMutes);
IMixPlugin *pPlugin = nullptr;
if(nPlugin > 0 && nPlugin <= MAX_MIXPLUGINS)
{
pPlugin = m_MixPlugins[nPlugin - 1].pMixPlugin;
}
// Couldn't find a valid plugin
if(pPlugin == nullptr) return;
const ModCommand::NOTE note = chn.rowCommand.note;
// Check for volume commands
uint8 vol = 0xFF;
if(chn.rowCommand.volcmd == VOLCMD_VOLUME)
{
vol = std::min(chn.rowCommand.vol, uint8(64));
} else if(chn.rowCommand.command == CMD_VOLUME)
{
vol = std::min(chn.rowCommand.param, uint8(64));
}
const bool hasVolCommand = (vol != 0xFF);
if(m_playBehaviour[kMIDICCBugEmulation])
{
if(note != NOTE_NONE)
{
ModCommand::NOTE realNote = note;
if(ModCommand::IsNote(note))
realNote = pIns->NoteMap[note - NOTE_MIN];
SendMIDINote(nChn, realNote, static_cast<uint16>(chn.nVolume));
} else if(hasVolCommand)
{
pPlugin->MidiCC(MIDIEvents::MIDICC_Volume_Fine, vol, nChn);
}
return;
}
const uint32 defaultVolume = pIns->nGlobalVol;
//If new note, determine notevelocity to use.
if(note != NOTE_NONE)
{
int32 velocity = static_cast<int32>(4 * defaultVolume);
switch(pIns->pluginVelocityHandling)
{
case PLUGIN_VELOCITYHANDLING_CHANNEL:
velocity = chn.nVolume;
break;
default:
break;
}
int32 swing = chn.nVolSwing;
if(m_playBehaviour[kITSwingBehaviour]) swing *= 4;
velocity += swing;
Limit(velocity, 0, 256);
ModCommand::NOTE realNote = note;
if(ModCommand::IsNote(note))
realNote = pIns->NoteMap[note - NOTE_MIN];
// Experimental VST panning
//ProcessMIDIMacro(nChn, false, m_MidiCfg.szMidiGlb[MIDIOUT_PAN], 0, nPlugin);
SendMIDINote(nChn, realNote, static_cast<uint16>(velocity));
}
const bool processVolumeAlsoOnNote = (pIns->pluginVelocityHandling == PLUGIN_VELOCITYHANDLING_VOLUME);
const bool hasNote = m_playBehaviour[kMIDIVolumeOnNoteOffBug] ? (note != NOTE_NONE) : ModCommand::IsNote(note);
if((hasVolCommand && !hasNote) || (hasNote && processVolumeAlsoOnNote))
{
switch(pIns->pluginVolumeHandling)
{
case PLUGIN_VOLUMEHANDLING_DRYWET:
if(hasVolCommand) pPlugin->SetDryRatio(2 * vol);
else pPlugin->SetDryRatio(2 * defaultVolume);
break;
case PLUGIN_VOLUMEHANDLING_MIDI:
if(hasVolCommand) pPlugin->MidiCC(MIDIEvents::MIDICC_Volume_Coarse, std::min(uint8(127), static_cast<uint8>(2 * vol)), nChn);
else pPlugin->MidiCC(MIDIEvents::MIDICC_Volume_Coarse, static_cast<uint8>(std::min(uint32(127), static_cast<uint32>(2 * defaultVolume))), nChn);
break;
default:
break;
}
}
}
#endif // NO_PLUGINS
template<int channels>
MPT_FORCEINLINE void ApplyGlobalVolumeWithRamping(int32 *SoundBuffer, int32 *RearBuffer, int32 lCount, int32 m_nGlobalVolume, int32 step, int32 &m_nSamplesToGlobalVolRampDest, int32 &m_lHighResRampingGlobalVolume)
{
const bool isStereo = (channels >= 2);
const bool hasRear = (channels >= 4);
for(int pos = 0; pos < lCount; ++pos)
{
if(m_nSamplesToGlobalVolRampDest > 0)
{
// Ramping required
m_lHighResRampingGlobalVolume += step;
SoundBuffer[0] = Util::muldiv(SoundBuffer[0], m_lHighResRampingGlobalVolume, MAX_GLOBAL_VOLUME << VOLUMERAMPPRECISION);
if constexpr(isStereo) SoundBuffer[1] = Util::muldiv(SoundBuffer[1], m_lHighResRampingGlobalVolume, MAX_GLOBAL_VOLUME << VOLUMERAMPPRECISION);
if constexpr(hasRear) RearBuffer[0] = Util::muldiv(RearBuffer[0] , m_lHighResRampingGlobalVolume, MAX_GLOBAL_VOLUME << VOLUMERAMPPRECISION); else MPT_UNUSED_VARIABLE(RearBuffer);
if constexpr(hasRear) RearBuffer[1] = Util::muldiv(RearBuffer[1] , m_lHighResRampingGlobalVolume, MAX_GLOBAL_VOLUME << VOLUMERAMPPRECISION); else MPT_UNUSED_VARIABLE(RearBuffer);
m_nSamplesToGlobalVolRampDest--;
} else
{
SoundBuffer[0] = Util::muldiv(SoundBuffer[0], m_nGlobalVolume, MAX_GLOBAL_VOLUME);
if constexpr(isStereo) SoundBuffer[1] = Util::muldiv(SoundBuffer[1], m_nGlobalVolume, MAX_GLOBAL_VOLUME);
if constexpr(hasRear) RearBuffer[0] = Util::muldiv(RearBuffer[0] , m_nGlobalVolume, MAX_GLOBAL_VOLUME); else MPT_UNUSED_VARIABLE(RearBuffer);
if constexpr(hasRear) RearBuffer[1] = Util::muldiv(RearBuffer[1] , m_nGlobalVolume, MAX_GLOBAL_VOLUME); else MPT_UNUSED_VARIABLE(RearBuffer);
m_lHighResRampingGlobalVolume = m_nGlobalVolume << VOLUMERAMPPRECISION;
}
SoundBuffer += isStereo ? 2 : 1;
if constexpr(hasRear) RearBuffer += 2;
}
}
void CSoundFile::ProcessGlobalVolume(long lCount)
{
// should we ramp?
if(IsGlobalVolumeUnset())
{
// do not ramp if no global volume was set before (which is the case at song start), to prevent audible glitches when default volume is > 0 and it is set to 0 in the first row
m_PlayState.m_nGlobalVolumeDestination = m_PlayState.m_nGlobalVolume;
m_PlayState.m_nSamplesToGlobalVolRampDest = 0;
m_PlayState.m_nGlobalVolumeRampAmount = 0;
} else if(m_PlayState.m_nGlobalVolumeDestination != m_PlayState.m_nGlobalVolume)
{
// User has provided new global volume
// m_nGlobalVolume: the last global volume which got set e.g. by a pattern command
// m_nGlobalVolumeDestination: the current target of the ramping algorithm
const bool rampUp = m_PlayState.m_nGlobalVolume > m_PlayState.m_nGlobalVolumeDestination;
m_PlayState.m_nGlobalVolumeDestination = m_PlayState.m_nGlobalVolume;
m_PlayState.m_nSamplesToGlobalVolRampDest = m_PlayState.m_nGlobalVolumeRampAmount = rampUp ? m_MixerSettings.GetVolumeRampUpSamples() : m_MixerSettings.GetVolumeRampDownSamples();
}
// calculate ramping step
int32 step = 0;
if (m_PlayState.m_nSamplesToGlobalVolRampDest > 0)
{
// Still some ramping left to do.
int32 highResGlobalVolumeDestination = static_cast<int32>(m_PlayState.m_nGlobalVolumeDestination) << VOLUMERAMPPRECISION;
const long delta = highResGlobalVolumeDestination - m_PlayState.m_lHighResRampingGlobalVolume;
step = delta / static_cast<long>(m_PlayState.m_nSamplesToGlobalVolRampDest);
if(m_nMixLevels == MixLevels::v1_17RC2)
{
// Define max step size as some factor of user defined ramping value: the lower the value, the more likely the click.
// If step is too big (might cause click), extend ramp length.
// Warning: This increases the volume ramp length by EXTREME amounts (factors of 100 are easily reachable)
// compared to the user-defined setting, so this really should not be used!
int32 maxStep = std::max(int32(50), static_cast<int32>((10000 / (m_PlayState.m_nGlobalVolumeRampAmount + 1))));
while(std::abs(step) > maxStep)
{
m_PlayState.m_nSamplesToGlobalVolRampDest += m_PlayState.m_nGlobalVolumeRampAmount;
step = delta / static_cast<int32>(m_PlayState.m_nSamplesToGlobalVolRampDest);
}
}
}
// apply volume and ramping
if(m_MixerSettings.gnChannels == 1)
{
ApplyGlobalVolumeWithRamping<1>(MixSoundBuffer, MixRearBuffer, lCount, m_PlayState.m_nGlobalVolume, step, m_PlayState.m_nSamplesToGlobalVolRampDest, m_PlayState.m_lHighResRampingGlobalVolume);
} else if(m_MixerSettings.gnChannels == 2)
{
ApplyGlobalVolumeWithRamping<2>(MixSoundBuffer, MixRearBuffer, lCount, m_PlayState.m_nGlobalVolume, step, m_PlayState.m_nSamplesToGlobalVolRampDest, m_PlayState.m_lHighResRampingGlobalVolume);
} else if(m_MixerSettings.gnChannels == 4)
{
ApplyGlobalVolumeWithRamping<4>(MixSoundBuffer, MixRearBuffer, lCount, m_PlayState.m_nGlobalVolume, step, m_PlayState.m_nSamplesToGlobalVolRampDest, m_PlayState.m_lHighResRampingGlobalVolume);
}
}
void CSoundFile::ProcessStereoSeparation(long countChunk)
{
ApplyStereoSeparation(MixSoundBuffer, MixRearBuffer, m_MixerSettings.gnChannels, countChunk, m_MixerSettings.m_nStereoSeparation);
}
OPENMPT_NAMESPACE_END
| 39,259 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.