language
stringlengths 0
24
| filename
stringlengths 9
214
| code
stringlengths 99
9.93M
|
---|---|---|
Python | wireshark/tools/check_val_to_str.py | #!/usr/bin/env python3
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
# Scan dissectors for calls to val_to_str() and friends,
# checking for appropriate format specifier strings in
# 'unknown' arg.
# TODO:
# - more detailed format specifier checking (check letter, that there is only 1)
# - scan conformance (.cnf) files for ASN1 dissectors?
import os
import re
import subprocess
import argparse
import signal
# Try to exit soon after Ctrl-C is pressed.
should_exit = False
def signal_handler(sig, frame):
global should_exit
should_exit = True
print('You pressed Ctrl+C - exiting')
signal.signal(signal.SIGINT, signal_handler)
# Test for whether the given file was automatically generated.
def isGeneratedFile(filename):
# Check file exists - e.g. may have been deleted in a recent commit.
if not os.path.exists(filename):
return False
# Open file
f_read = open(os.path.join(filename), 'r', encoding="utf8")
lines_tested = 0
for line in f_read:
# The comment to say that its generated is near the top, so give up once
# get a few lines down.
if lines_tested > 10:
f_read.close()
return False
if (line.find('Generated automatically') != -1 or
line.find('Generated Automatically') != -1 or
line.find('Autogenerated from') != -1 or
line.find('is autogenerated') != -1 or
line.find('automatically generated by Pidl') != -1 or
line.find('Created by: The Qt Meta Object Compiler') != -1 or
line.find('This file was generated') != -1 or
line.find('This filter was automatically generated') != -1 or
line.find('This file is auto generated, do not edit!') != -1 or
line.find('This file is auto generated') != -1):
f_read.close()
return True
lines_tested = lines_tested + 1
# OK, looks like a hand-written file!
f_read.close()
return False
def removeComments(code_string):
code_string = re.sub(re.compile(r"/\*.*?\*/",re.DOTALL ) ,"" ,code_string) # C-style comment
code_string = re.sub(re.compile(r"//.*?\n" ) ,"" ,code_string) # C++-style comment
return code_string
def is_dissector_file(filename):
p = re.compile(r'.*packet-.*\.c')
return p.match(filename)
def findDissectorFilesInFolder(folder, recursive=False):
dissector_files = []
if recursive:
for root, subfolders, files in os.walk(folder):
for f in files:
if should_exit:
return
f = os.path.join(root, f)
dissector_files.append(f)
else:
for f in sorted(os.listdir(folder)):
if should_exit:
return
filename = os.path.join(folder, f)
dissector_files.append(filename)
return [x for x in filter(is_dissector_file, dissector_files)]
warnings_found = 0
errors_found = 0
# Check the given dissector file.
def checkFile(filename):
global warnings_found
global errors_found
# Check file exists - e.g. may have been deleted in a recent commit.
if not os.path.exists(filename):
print(filename, 'does not exist!')
return
with open(filename, 'r', encoding="utf8") as f:
contents = f.read()
# Remove comments so as not to trip up RE.
contents = removeComments(contents)
matches = re.finditer(r'(?<!try_)(?<!char_)(?<!bytes)(r?val_to_str(?:_ext|)(?:_const|))\(.*?,.*?,\s*(".*?\")\s*\)', contents)
for m in matches:
function = m.group(1)
format_string = m.group(2)
# Ignore what appears to be a macro.
if format_string.find('#') != -1:
continue
if function.endswith('_const'):
# These ones shouldn't have a specifier - its an error if they do.
# TODO: I suppose it could be escaped, but haven't seen this...
if format_string.find('%') != -1:
# This is an error as format specifier would show in app
print('Error:', filename, " ", m.group(0), ' - should not have specifiers in unknown string')
errors_found += 1
else:
# These ones need to have a specifier, and it should be suitable for an int
specifier_id = format_string.find('%')
if specifier_id == -1:
print('Warning:', filename, " ", m.group(0), ' - should have suitable format specifier in unknown string (or use _const()?)')
warnings_found += 1
# TODO: check allowed specifiers (d, u, x, ?) and modifiers (0-9*) in re ?
if format_string.find('%s') != -1:
# This is an error as this likely causes a crash
print('Error:', filename, " ", m.group(0), ' - inappropriate format specifier in unknown string')
errors_found += 1
#################################################################
# Main logic.
# command-line args. Controls which dissector files should be checked.
# If no args given, will scan all dissectors.
parser = argparse.ArgumentParser(description='Check calls in dissectors')
parser.add_argument('--file', action='append',
help='specify individual dissector file to test')
parser.add_argument('--commits', action='store',
help='last N commits to check')
parser.add_argument('--open', action='store_true',
help='check open files')
args = parser.parse_args()
# Get files from wherever command-line args indicate.
files = []
if args.file:
# Add specified file(s)
for f in args.file:
if not f.startswith('epan'):
f = os.path.join('epan', 'dissectors', f)
if not os.path.isfile(f):
print('Chosen file', f, 'does not exist.')
exit(1)
else:
files.append(f)
elif args.commits:
# Get files affected by specified number of commits.
command = ['git', 'diff', '--name-only', 'HEAD~' + args.commits]
files = [f.decode('utf-8')
for f in subprocess.check_output(command).splitlines()]
# Will examine dissector files only
files = list(filter(lambda f : is_dissector_file(f), files))
elif args.open:
# Unstaged changes.
command = ['git', 'diff', '--name-only']
files = [f.decode('utf-8')
for f in subprocess.check_output(command).splitlines()]
# Only interested in dissector files.
files = list(filter(lambda f : is_dissector_file(f), files))
# Staged changes.
command = ['git', 'diff', '--staged', '--name-only']
files_staged = [f.decode('utf-8')
for f in subprocess.check_output(command).splitlines()]
# Only interested in dissector files.
files_staged = list(filter(lambda f : is_dissector_file(f), files_staged))
for f in files_staged:
if not f in files:
files.append(f)
else:
# Find all dissector files from folder.
files = findDissectorFilesInFolder(os.path.join('epan', 'dissectors'))
files += findDissectorFilesInFolder(os.path.join('plugins', 'epan'), recursive=True)
files += findDissectorFilesInFolder(os.path.join('epan', 'dissectors', 'asn1'), recursive=True)
# If scanning a subset of files, list them here.
print('Examining:')
if args.file or args.commits or args.open:
if files:
print(' '.join(files), '\n')
else:
print('No files to check.\n')
else:
print('All dissectors\n')
# Now check the chosen files
for f in files:
if should_exit:
exit(1)
if not isGeneratedFile(f):
checkFile(f)
# Show summary.
print(warnings_found, 'warnings found')
if errors_found:
print(errors_found, 'errors found')
exit(1) |
Python | wireshark/tools/colorfilters2js.py | #!/usr/bin/env python
#
# Copyright 2022 by Moshe Kaplan
# Based on colorfilter2js.pl by Dirk Jagdmann <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
# Python script to convert a Wireshark color scheme to javascript
# code. The javascript function should then be inserted into the
# pdml2html.xsl file.
#
# run this as: python tools/colorfilters2js.py colorfilters
import argparse
import io
import re
import sys
js_prologue = """\
function set_node_color(node, colorname)
{
if (dojo.isString(node))
node = dojo.byId(node);
if (!node) return;
var fg;
var bg;
"""
js_color_entry = """\
{7}if (colorname == '{0}') {{
bg='#{1:02x}{2:02x}{3:02x}';
fg='#{4:02x}{5:02x}{6:02x}';
}}\
"""
js_epilogue = """
if (fg.length > 0)
node.style.color = fg;
if (bg.length > 0)
node.style.background = bg;
}
"""
def generate_javascript(colorlines):
output = [js_prologue]
else_text = ""
for colorline in colorlines:
colorvalues = colorline[0], int(colorline[1])//256, int(colorline[2])//256, int(colorline[3])//256, int(colorline[4])//256, int(colorline[5])//256, int(colorline[6])//256, else_text
output += [js_color_entry.format(*colorvalues)]
else_text = "else "
output += [js_epilogue]
return "\n".join(output)
def main():
parser = argparse.ArgumentParser(description="Convert a Wireshark color scheme to javascript code.")
parser.add_argument("files", metavar='files', nargs='+', help="paths to colorfiles")
parsed_args = parser.parse_args()
COLORLINE_PATTERN = r"\@(.+?)\@.+\[(\d+),(\d+),(\d+)\]\[(\d+),(\d+),(\d+)\]"
colorlines = []
# Sample line:
# @[email protected]@[4626,10023,11822][63479,34695,34695]
# Read the lines from all files:
for filename in parsed_args.files:
with open(filename, encoding='utf-8') as fh:
file_content = fh.read()
colorlines += re.findall(COLORLINE_PATTERN, file_content)
javascript_code = generate_javascript(colorlines)
stdoutu8 = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
stdoutu8.write(javascript_code)
if __name__ == "__main__":
main() |
Python | wireshark/tools/compress-pngs.py | #!/usr/bin/env python3
#
# compress-pngs.py - Compress PNGs
#
# By Gerald Combs <[email protected]
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
'''Run various compression and optimization utilities on one or more PNGs'''
import argparse
import concurrent.futures
import shutil
import subprocess
import sys
PNG_FILE_ARG = '%PNG_FILE_ARG%'
def get_compressors():
# Add *lossless* compressors here.
compressors = {
# https://github.com/shssoichiro/oxipng
'oxipng': { 'args': ['--opt', 'max', '--strip', 'safe', PNG_FILE_ARG] },
# http://optipng.sourceforge.net/
'optipng': { 'args': ['-o3', '-quiet', PNG_FILE_ARG] },
# https://github.com/amadvance/advancecomp
'advpng': { 'args': ['--recompress', '--shrink-insane', PNG_FILE_ARG] },
# https://github.com/amadvance/advancecomp
'advdef': { 'args': ['--recompress', '--shrink-insane', PNG_FILE_ARG] },
# https://pmt.sourceforge.io/pngcrush/
'pngcrush': { 'args': ['-q', '-ow', '-brute', '-reduce', '-noforce', PNG_FILE_ARG, 'pngcrush.$$$$.png'] },
# https://github.com/fhanau/Efficient-Compression-Tool
'ect': { 'args': ['-5', '--mt-deflate', '--mt-file', '-strip', PNG_FILE_ARG]}
}
for compressor in compressors:
compressor_path = shutil.which(compressor)
if compressor_path:
compressors[compressor]['path'] = compressor_path
return compressors
def compress_png(png_file, compressors):
for compressor in compressors:
if not compressors[compressor].get('path', False):
continue
args = compressors[compressor]['args']
args = [arg.replace(PNG_FILE_ARG, png_file) for arg in args]
try:
compress_proc = subprocess.run([compressor] + args)
except Exception:
print('{} returned {}:'.format(compressor, compress_proc.returncode))
def main():
parser = argparse.ArgumentParser(description='Compress PNGs')
parser.add_argument('--list', action='store_true',
help='List available compressors')
parser.add_argument('png_files', nargs='*', metavar='png file', help='Files to compress')
args = parser.parse_args()
compressors = get_compressors()
c_count = 0
for compressor in compressors:
if 'path' in compressors[compressor]:
c_count += 1
if c_count < 1:
sys.stderr.write('No compressors found\n')
sys.exit(1)
if args.list:
for compressor in compressors:
path = compressors[compressor].get('path', 'Not found')
print('{}: {}'.format(compressor, path))
sys.exit(0)
with concurrent.futures.ProcessPoolExecutor() as executor:
futures = []
for png_file in args.png_files:
print('Compressing {}'.format(png_file))
futures.append(executor.submit(compress_png, png_file, compressors))
concurrent.futures.wait(futures)
if __name__ == '__main__':
main() |
Python | wireshark/tools/convert-glib-types.py | #!/usr/bin/env python3
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
'''\
convert-glib-types.py - Convert glib types to their C and C99 eqivalents.
'''
# Imports
import argparse
import glob
import platform
import re
import sys
padded_type_map = {}
type_map = {
'gboolean': 'bool',
'gchar': 'char',
'guchar': 'unsigned char',
'gint': 'int',
'guint': 'unsigned', # Matches README.developer
'glong': 'long',
'gulong': 'unsigned long',
'gint8': 'int8_t',
'gint16': 'int16_t',
'gint32': 'int32_t',
'gint64': 'int64_t',
'guint8': 'uint8_t',
'guint16': 'uint16_t',
'guint32': 'uint32_t',
'guint64': 'uint64_t',
'gfloat': 'float',
'gdouble': 'double',
'gpointer': 'void *',
# Is gsize the same as size_t on the platforms we support?
# https://gitlab.gnome.org/GNOME/glib/-/issues/2493
'gsize': 'size_t',
'gssize': 'ssize_t',
'TRUE': 'true',
'FALSE': 'false',
'G_MAXINT8': 'INT8_MAX',
'G_MAXINT16': 'INT16_MAX',
'G_MAXINT32': 'INT32_MAX',
'G_MAXINT64': 'INT64_MAX',
'G_MAXUINT8': 'UINT8_MAX',
'G_MAXUINT16': 'UINT16_MAX',
'G_MAXUINT32': 'UINT32_MAX',
'G_MAXUINT64': 'UINT64_MAX',
'G_MININT8': 'INT8_MIN',
'G_MININT16': 'INT16_MIN',
'G_MININT32': 'INT32_MIN',
'G_MININT64': 'INT64_MIN',
}
def convert_file(file):
lines = ''
try:
with open(file, 'r') as f:
lines = f.read()
for glib_type, c99_type in padded_type_map.items():
lines = lines.replace(glib_type, c99_type)
for glib_type, c99_type in type_map.items():
lines = re.sub(rf'([^"])\b{glib_type}\b([^"])', rf'\1{c99_type}\2', lines, flags=re.MULTILINE)
except IsADirectoryError:
sys.stderr.write(f'{file} is a directory.\n')
return
except UnicodeDecodeError:
sys.stderr.write(f"{file} isn't valid UTF-8.\n")
return
except:
sys.stderr.write(f'Unable to open {file}.\n')
return
with open(file, 'w') as f:
f.write(lines)
print(f'Converted {file}')
def main():
parser = argparse.ArgumentParser(description='Convert glib types to their C and C99 eqivalents.')
parser.add_argument('files', metavar='FILE', nargs='*')
args = parser.parse_args()
# Build a padded version of type_map which attempts to preseve alignment
for glib_type, c99_type in type_map.items():
pg_type = glib_type + ' '
pc_type = c99_type + ' '
pad_len = max(len(pg_type), len(pc_type))
padded_type_map[f'{pg_type:{pad_len}s}'] = f'{pc_type:{pad_len}s}'
files = []
if platform.system() == 'Windows':
for arg in args.files:
files += glob.glob(arg)
else:
files = args.files
for file in files:
convert_file(file)
# On with the show
if __name__ == "__main__":
sys.exit(main()) |
Perl | wireshark/tools/convert_expert_add_info_format.pl | #!/usr/bin/env perl
#
# Copyright 2013 Michael Mann (see AUTHORS file)
#
# A program to help convert the "old" expert_add_info_format API calls into filterable "items" that
# use the other expert API calls. The program requires 2 passes. "Pass 1" (generate) collects
# the eligible expert_add_info_format calls and outputs the necessary data into a delimited
# file. "Pass 2" (fix-all) takes the data from the delimited file and replaces the
# expert_add_info_format calls with filterable "expert info" calls as well as
# generating a separate files for the ei variable declarations and array data.
# The ei "file" can be copy/pasted into the dissector where appropriate
#
# Note that the output from "Pass 1" won't always be a perfect conversion for "Pass 2", so
# "human interaction" is needed as an intermediary to verify and update the delimited file
# before "Pass 2" is done.
#
# Delimited file field format:
# <convert expert_add_info_format_call[1-4]><add ei variable[0|1]><ei var><[GROUP]><[SEVERITY]><[FIELDNAME]><[EXPERTABBREV]>
# <pinfo var><proto_item var><tvb var><offset><length><params>
#
# convert proto_tree_add_text_call enumerations:
# 1 - expert_add_info
# 2 - expert_add_info_format
# 3 - proto_tree_add_expert
# 4 - proto_tree_add_expert_format
#
# Usage: convert_expert_add_info_format.pl action=<generate|fix-all> <file or files>
#
# Based off of convert_proto_tree_add_text.pl
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
use strict;
use warnings;
use Getopt::Long;
my %EXPERT_SEVERITY = ('PI_COMMENT' => "PI_COMMENT",
'PI_CHAT' => "PI_CHAT",
'PI_NOTE' => "PI_NOTE",
'PI_WARN' => "PI_WARN",
'PI_ERROR' => "PI_ERROR");
my %EXPERT_GROUPS = ('PI_CHECKSUM' => "PI_CHECKSUM",
'PI_SEQUENCE' => "PI_SEQUENCE",
'PI_RESPONSE_CODE' => "PI_RESPONSE_CODE",
'PI_REQUEST_CODE' => "PI_REQUEST_CODE",
'PI_UNDECODED' => "PI_UNDECODED",
'PI_REASSEMBLE' => "PI_REASSEMBLE",
'PI_MALFORMED' => "PI_MALFORMED",
'PI_DEBUG' => "PI_DEBUG",
'PI_PROTOCOL' => "PI_PROTOCOL",
'PI_SECURITY' => "PI_SECURITY",
'PI_COMMENTS_GROUP' => "PI_COMMENTS_GROUP",
'PI_DECRYPTION' => "PI_DECRYPTION",
'PI_ASSUMPTION' => "PI_ASSUMPTION",
'PI_DEPRECATED' => "PI_DEPRECATED");
my @expert_list;
my $protabbrev = "";
# Perl trim function to remove whitespace from the start and end of the string
sub trim($)
{
my $string = shift;
$string =~ s/^\s+//;
$string =~ s/\s+$//;
return $string;
}
# ---------------------------------------------------------------------
#
# MAIN
#
my $helpFlag = '';
my $action = 'generate';
my $register = '';
my $result = GetOptions(
'action=s' => \$action,
'register' => \$register,
'help|?' => \$helpFlag
);
if (!$result || $helpFlag || !$ARGV[0]) {
usage();
}
sub usage {
print "\nUsage: $0 [--action=generate|fix-all|find-all] FILENAME [...]\n\n";
print " --action = generate (default)\n";
print " generate - create a delimited file (FILENAME.expert_add_info_input) with\n";
print " expert_add_info_format fields in FILENAME(s)\n";
print " fix-all - Use delimited file (FILENAME.expert_add_info_input) to convert\n";
print " expert_add_info_format to \"filterable\" expert API\n";
print " Also generates FILENAME.ei to be copy/pasted into\n";
print " the dissector where appropriate\n\n";
print " --register = generate ei_register_info and expert register function calls\n\n";
exit(1);
}
#
# XXX Outline general algorithm here
#
my $found_total = 0;
my $protabbrev_index;
my $line_number = 0;
while (my $fileName = $ARGV[0]) {
shift;
my $fileContents = '';
die "No such file: \"$fileName\"\n" if (! -e $fileName);
# delete leading './'
$fileName =~ s{ ^ \. / } {}xo;
#determine PROTABBREV for dissector based on file name format of (dirs)/packet-PROTABBREV.c
$protabbrev_index = rindex($fileName, "packet-");
if ($protabbrev_index == -1) {
print "$fileName doesn't fit format of packet-PROTABBREV.c\n";
next;
}
$protabbrev = substr($fileName, $protabbrev_index+length("packet-"));
$protabbrev_index = rindex($protabbrev, ".");
if ($protabbrev_index == -1) {
print "$fileName doesn't fit format of packet-PROTABBREV.c\n";
next;
}
$protabbrev = lc(substr($protabbrev, 0, $protabbrev_index));
# Read in the file (ouch, but it's easier that way)
open(FCI, "<", $fileName) || die("Couldn't open $fileName");
while (<FCI>) {
$fileContents .= $_;
}
close(FCI);
if ($action eq "generate") {
generate_eis(\$fileContents, $fileName);
}
if ($action eq "fix-all") {
# Read in the ei "input" file
$line_number = 0;
my $errors = 0;
open(FCI, "<", $fileName . ".expert_add_info_input") || die("Couldn't open $fileName.expert_add_info_input");
while(my $line=<FCI>){
my @expert_item = split(/;|\n/, $line);
$line_number++;
$errors += verify_line(@expert_item);
push(@expert_list, \@expert_item);
}
close(FCI);
if ($errors > 0) {
print "Aborting conversion.\n";
exit(-1);
}
fix_expert_add_info_format(\$fileContents, $fileName);
# Write out the ei data
output_ei_data($fileName);
# Write out the changed version to a file
open(FCO, ">", $fileName . ".expert_add_info_format");
print FCO "$fileContents";
close(FCO);
}
} # while
exit $found_total;
# ---------------------------------------------------------------------
# Sanity check the data in the .proto_tree_input file
sub verify_line {
my( @expert_item) = @_;
my $errors = 0;
#do some basic error checking of the file
if (($expert_item[0] eq "1") ||
($expert_item[0] eq "2") ||
($expert_item[0] eq "3") ||
($expert_item[0] eq "4")) {
#expert info conversions
if (!($expert_item[2] =~ /^ei_/)) {
print "$line_number: Poorly formed ei_ variable ($expert_item[2])!\n";
$errors++;
}
} else {
print "$line_number: Bad conversion value!\n";
$errors++;
}
if ($expert_item[1] eq "1") {
if (!($expert_item[2] =~ /^ei_/)) {
print "$line_number: Poorly formed ei_ variable ($expert_item[2])!\n";
$errors++;
}
if (!exists($EXPERT_SEVERITY{$expert_item[4]})) {
print "$line_number: Expert severity value '$expert_item[5]' unknown!\n";
$errors++;
}
if (!exists($EXPERT_GROUPS{$expert_item[3]})) {
print "$line_number: Expert group value '$expert_item[4]' unknown!\n";
$errors++;
}
} elsif ($expert_item[1] ne "0") {
print "$line_number: Bad ei variable generation value!\n";
$errors++;
}
return $errors;
}
sub generate_eis {
my( $fileContentsRef, $fileName) = @_;
my @args;
my $num_items = 0;
my @temp;
my $str_temp;
my $pat;
$pat = qr /
(
(?:expert_add_info_format)\s* \(
(([^[\,;])*\,){4,}
[^;]*
\s* \) \s* ;
)
/xs;
while ($$fileContentsRef =~ / $pat /xgso) {
my @expert_item = (1, 1, "ei_name", "GROUP", "SEVERITY", "fieldfullname", "fieldabbrevname",
"pinfo", "item", "tvb", "offset", "length", "params");
my $arg_loop = 5;
my $str = "${1}\n";
$str =~ tr/\t\n\r/ /d;
$str =~ s/ \s+ / /xg;
#print "$fileName: $str\n";
@args = split(/,/, $str);
#printf "ARGS(%d): %s\n", scalar @args, join("# ", @args);
$args[0] =~ s/expert_add_info_format\s*\(\s*//;
$expert_item[7] = $args[0]; #pinfo
$expert_item[8] = trim($args[1]); #item
$expert_item[3] = trim($args[2]); #GROUP
$expert_item[4] = trim($args[3]); #SEVERITY
$expert_item[5] = trim($args[4]); #fieldfullname
$expert_item[5] =~ s/\"//;
#XXX - conditional?
$expert_item[5] =~ s/\"\s*\)\s*;$//;
$expert_item[5] =~ s/\"$//;
#params
$expert_item[12] = "";
while ($arg_loop < scalar @args) {
$expert_item[12] .= trim($args[$arg_loop]);
if ($arg_loop+1 < scalar @args) {
$expert_item[12] .= ", ";
}
$arg_loop += 1;
}
$expert_item[12] =~ s/\s*\)\s*;$//;
#ei variable name
$expert_item[2] = sprintf("ei_%s_%s", $protabbrev, lc($expert_item[5]));
$expert_item[2] =~ s/\s+|-|:/_/g;
#field abbreviated name
$expert_item[6] = sprintf("%s.%s", $protabbrev, lc($expert_item[5]));
$expert_item[6] =~ s/\s+|-|:/_/g;
push(@expert_list, \@expert_item);
$num_items += 1;
}
if ($num_items > 0) {
open(FCO, ">", $fileName . ".expert_add_info_input");
for my $item (@expert_list) {
print FCO join(";", @{$item}), "\n";
}
close(FCO);
}
}
# ---------------------------------------------------------------------
# Find all expert_add_info_format calls and replace them with the data
# found in expert_list
sub fix_expert_add_info_format {
my( $fileContentsRef, $fileName) = @_;
my $found = 0;
my $pat;
$pat = qr /
(
(?:expert_add_info_format)\s* \(
(([^[\,;])*\,){4,}
[^;]*
\s* \) \s* ;
)
/xs;
$$fileContentsRef =~ s/ $pat /patsub($found, $1)/xges;
}
# ---------------------------------------------------------------------
# Format expert info functions with expert_list data
sub patsub {
my $item_str;
#print $expert_list[$_[0]][2] . " = ";
#print $#{$expert_list[$_[0]]}+1;
#print "\n";
if ($expert_list[$_[0]][0] eq "1") {
$item_str = sprintf("expert_add_info(%s, %s, &%s);",
$expert_list[$_[0]][7], $expert_list[$_[0]][8], $expert_list[$_[0]][2]);
} elsif ($expert_list[$_[0]][0] eq "2") {
$item_str = sprintf("expert_add_info_format(%s, %s, &%s, \"%s\"",
$expert_list[$_[0]][7], $expert_list[$_[0]][8],
$expert_list[$_[0]][2], $expert_list[$_[0]][5]);
if (($#{$expert_list[$_[0]]}+1 > 12 ) && ($expert_list[$_[0]][12] ne "")) {
$item_str .= ", $expert_list[$_[0]][12]";
}
$item_str .= ");";
} elsif ($expert_list[$_[0]][0] eq "3") {
$item_str = sprintf("proto_tree_add_expert(%s, %s, &%s, %s, %s, %s);",
$expert_list[$_[0]][8], $expert_list[$_[0]][7],
$expert_list[$_[0]][2], $expert_list[$_[0]][9],
$expert_list[$_[0]][10], $expert_list[$_[0]][11]);
} elsif ($expert_list[$_[0]][0] eq "4") {
$item_str = sprintf("proto_tree_add_expert_format(%s, %s, &%s, %s, %s, %s, \"%s\"",
$expert_list[$_[0]][8], $expert_list[$_[0]][7], $expert_list[$_[0]][2],
$expert_list[$_[0]][9], $expert_list[$_[0]][10],
$expert_list[$_[0]][11], $expert_list[$_[0]][5]);
if (($#{$expert_list[$_[0]]}+1 > 12) && ($expert_list[$_[0]][12] ne "")) {
$item_str .= ", $expert_list[$_[0]][12]";
}
$item_str .= ");";
}
$_[0] += 1;
return $item_str;
}
# ---------------------------------------------------------------------
# Output the ei variable declarations and expert array. For now, write them to a file.
# XXX - Eventually find the right place to add it to the modified dissector file
sub output_ei_data {
my( $fileName) = @_;
my %eis = ();
my $index;
my $key;
#add ei to hash table to prevent against (accidental) duplicates
for ($index=0;$index<@expert_list;$index++) {
if ($expert_list[$index][1] eq "1") {
$eis{$expert_list[$index][2]} = $expert_list[$index][2];
}
}
open(FCO, ">", $fileName . ".ei");
print FCO "/* Generated from convert_expert_add_info_format.pl */\n";
foreach $key (keys %eis) {
print FCO "static expert_field $key = EI_INIT;\n";
}
print FCO "\n\n";
if ($register ne "") {
print FCO " static ei_register_info ei[] = {\n";
}
%eis = ();
for ($index=0;$index<@expert_list;$index++) {
if ($expert_list[$index][1] eq "1") {
if (exists($eis{$expert_list[$index][2]})) {
print "duplicate ei entry '$expert_list[$index][2]' found! Aborting conversion.\n";
exit(-1);
}
$eis{$expert_list[$index][2]} = $expert_list[$index][2];
print FCO " { &$expert_list[$index][2], { \"$expert_list[$index][6]\", $expert_list[$index][3], ";
print FCO "$expert_list[$index][4], \"$expert_list[$index][5]\", EXPFILL }},\r\n";
}
}
if ($register ne "") {
print FCO " };\n\n\n";
print FCO " expert_module_t* expert_$protabbrev;\n\n";
print FCO " expert_$protabbrev = expert_register_protocol(proto_$protabbrev);\n";
print FCO " expert_register_field_array(expert_$protabbrev, ei, array_length(ei));\n\n";
}
close(FCO);
} |
Perl | wireshark/tools/convert_proto_tree_add_text.pl | #!/usr/bin/env perl
#
# Copyright 2013 Michael Mann (see AUTHORS file)
#
# A program to help convert proto_tree_add_text calls into filterable "items" that
# use proto_tree_add_item. The program requires 2 passes. "Pass 1" (generate) collects
# the eligible proto_tree_add_text calls and outputs the necessary data into a delimited
# file. "Pass 2" (fix-all) takes the data from the delimited file and replaces the
# proto_tree_add_text calls with proto_tree_add_item or "expert info" calls as well as
# generating separate files for the hf and/or ei variable declarations and hf and/or ei array data.
# The hf "files" can be copy/pasted into the dissector where appropriate (until such time as
# its done automatically)
#
# Note that the output from "Pass 1" won't always be a perfect conversion for "Pass 2", so
# "human interaction" is needed as an intermediary to verify and update the delimited file
# before "Pass 2" is done.
# It is also recommended to run checkhf.pl and checkAPIs.pl after "Pass 2" is completed.
#
# Delimited file field format:
# <convert proto_tree_add_text_call[0|1|10-13]><add hf or ei variable[0|1|2]><proto_tree var><hf var><tvb var><offset><length><encoding|[EXPERT_GROUPS]>
# <[FIELDNAME]><[FIELDTYPE]|[EXPERT_SEVERITY]><[FIELDABBREV]><[FIELDDISPLAY]><[FIELDCONVERT]><[BITMASK]>
#
# convert proto_tree_add_text_call enumerations:
# 0 - no conversions
# 1 - proto_tree_add_item
# 10 - expert_add_info
# 11 - expert_add_info_format
# 12 - proto_tree_add_expert
# 13 - proto_tree_add_expert_format
#
# Usage: convert_proto_tree_add_text.pl action=<generate|fix-all> <file or files>
#
# Lots of code shamelessly borrowed from fix-encoding-args.pl (Thanks Bill!)
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
use strict;
use warnings;
use Getopt::Long;
my %DISPLAY_BASE = ('BASE_NONE' => "BASE_NONE",
'BASE_DEC' => "BASE_DEC",
'BASE_HEX' => "BASE_HEX",
'BASE_OCT' => "BASE_OCT",
'BASE_DEC_HEX' => "BASE_DEC_HEX",
'BASE_HEX_DEC' => "BASE_HEX_DEC",
'BASE_EXT_STRING' => "BASE_EXT_STRING",
'BASE_RANGE_STRING' => "BASE_RANGE_STRING",
'ABSOLUTE_TIME_LOCAL' => "ABSOLUTE_TIME_LOCAL",
'ABSOLUTE_TIME_UTC' => "ABSOLUTE_TIME_UTC",
'ABSOLUTE_TIME_DOY_UTC' => "ABSOLUTE_TIME_DOY_UTC",
'BASE_CUSTOM' => "BASE_CUSTOM");
my %ENCODINGS = ('ENC_BIG_ENDIAN' => "ENC_BIG_ENDIAN",
'ENC_LITTLE_ENDIAN' => "ENC_LITTLE_ENDIAN",
'ENC_TIME_SECS_NSECS' => "ENC_TIME_SECS_NSECS",
'ENC_TIME_NTP' => "ENC_TIME_NTP",
'ENC_ASCII' => "ENC_ASCII",
'ENC_UTF_8' => "ENC_UTF_8",
'ENC_UTF_16' => "ENC_UTF_16",
'ENC_UCS_2' => "ENC_UCS_2",
'ENC_EBCDIC' => "ENC_EBCDIC",
'ENC_NA' => "ENC_NA");
my %FIELD_TYPE = ('FT_NONE' => "FT_NONE", 'FT_PROTOCOL' => "FT_PROTOCOL", 'FT_BOOLEAN' => "FT_BOOLEAN",
'FT_UINT8' => "FT_UINT8", 'FT_UINT16' => "FT_UINT16", 'FT_UINT24' => "FT_UINT24", 'FT_UINT32' => "FT_UINT32", 'FT_UINT64' => "FT_UINT64",
'FT_INT8' => "FT_INT8", 'FT_INT16' => "FT_INT16", 'FT_INT24' => "FT_INT24", 'FT_INT32' => "FT_INT32", 'FT_INT64' => "FT_INT64",
'FT_FLOAT' => "FT_FLOAT", 'FT_DOUBLE' => "FT_DOUBLE",
'FT_ABSOLUTE_TIME' => "FT_ABSOLUTE_TIME", 'FT_RELATIVE_TIME' => "FT_RELATIVE_TIME",
'FT_STRING' => "FT_STRING", 'FT_STRINGZ' => "FT_STRINGZ", 'FT_UINT_STRING' => "FT_UINT_STRING",
'FT_ETHER' => "FT_ETHER", 'FT_BYTES' => "FT_BYTES", 'FT_UINT_BYTES' => "FT_UINT_BYTES",
'FT_IPv4' => "FT_IPv4", 'FT_IPv6' => "FT_IPv6", 'FT_IPXNET' => "FT_IPXNET", 'FT_AX25' => "FT_AX25", 'FT_VINES' => "FT_VINES",
'FT_FRAMENUM' => "FT_FRAMENUM", 'FT_GUID' => "FT_GUID", 'FT_OID' => "FT_OID", 'FT_REL_OID' => "FT_REL_OID", 'FT_EUI64' => "FT_EUI64");
my %EXPERT_SEVERITY = ('PI_COMMENT' => "PI_COMMENT",
'PI_CHAT' => "PI_CHAT",
'PI_NOTE' => "PI_NOTE",
'PI_WARN' => "PI_WARN",
'PI_ERROR' => "PI_ERROR");
my %EXPERT_GROUPS = ('PI_CHECKSUM' => "PI_CHECKSUM",
'PI_SEQUENCE' => "PI_SEQUENCE",
'PI_RESPONSE_CODE' => "PI_RESPONSE_CODE",
'PI_REQUEST_CODE' => "PI_REQUEST_CODE",
'PI_UNDECODED' => "PI_UNDECODED",
'PI_REASSEMBLE' => "PI_REASSEMBLE",
'PI_MALFORMED' => "PI_MALFORMED",
'PI_DEBUG' => "PI_DEBUG",
'PI_PROTOCOL' => "PI_PROTOCOL",
'PI_SECURITY' => "PI_SECURITY",
'PI_COMMENTS_GROUP' => "PI_COMMENTS_GROUP",
'PI_DECRYPTION' => "PI_DECRYPTION",
'PI_ASSUMPTION' => "PI_ASSUMPTION",
'PI_DEPRECATED' => "PI_DEPRECATED");
my @proto_tree_list;
my @expert_list;
my $protabbrev = "";
# Perl trim function to remove whitespace from the start and end of the string
sub trim($)
{
my $string = shift;
$string =~ s/^\s+//;
$string =~ s/\s+$//;
return $string;
}
# ---------------------------------------------------------------------
#
# MAIN
#
my $helpFlag = '';
my $action = 'generate';
my $encoding = '';
my $expert = '';
my $result = GetOptions(
'action=s' => \$action,
'encoding=s' => \$encoding,
'expert' => \$expert,
'help|?' => \$helpFlag
);
if (!$result || $helpFlag || !$ARGV[0]) {
usage();
}
sub usage {
print "\nUsage: $0 [--action=generate|fix-all|find-all] [--encoding=ENC_BIG_ENDIAN|ENC_LITTLE_ENDIAN] FILENAME [...]\n\n";
print " --action = generate (default)\n";
print " generate - create a delimited file (FILENAME.proto_tree_input) with\n";
print " proto_tree_add_text fields in FILENAME(s)\n";
print " fix-all - Use delimited file (FILENAME.proto_tree_input) to convert\n";
print " proto_tree_add_text to proto_tree_add_item\n";
print " Also generates FILENAME.hf and FILENAME.hf_array to be\n";
print " copy/pasted into the dissector where appropriate\n";
print " find-all - Output the number of eligible proto_tree_add_text calls\n";
print " for conversion\n\n";
print " --expert (Optional) Includes proto_tree_add_text calls with no printf arguments in\n";
print " the .proto_tree_input file as they could be converted to expert info\n";
print " (otherwise they are ignored)\n";
print " Must be called for 'fix-all' if called on 'generate'\n";
print " --encoding (Optional) Default encoding if one can't be determined\n";
print " (effective only for generate)\n";
print " If not specified, an encoding will not be auto-populated\n";
print " if undetermined\n\n";
exit(1);
}
#
# XXX Outline general algorithm here
#
my $found_total = 0;
my $protabbrev_index;
my $line_number = 0;
while (my $fileName = $ARGV[0]) {
shift;
my $fileContents = '';
die "No such file: \"$fileName\"\n" if (! -e $fileName);
# delete leading './'
$fileName =~ s{ ^ \. / } {}xo;
#determine PROTABBREV for dissector based on file name format of (dirs)/packet-PROTABBREV.c
$protabbrev_index = rindex($fileName, "packet-");
if ($protabbrev_index == -1) {
print "$fileName doesn't fit format of packet-PROTABBREV.c\n";
next;
}
$protabbrev = substr($fileName, $protabbrev_index+length("packet-"));
$protabbrev_index = rindex($protabbrev, ".");
if ($protabbrev_index == -1) {
print "$fileName doesn't fit format of packet-PROTABBREV.c\n";
next;
}
$protabbrev = lc(substr($protabbrev, 0, $protabbrev_index));
# Read in the file (ouch, but it's easier that way)
open(FCI, "<", $fileName) || die("Couldn't open $fileName");
while (<FCI>) {
$fileContents .= $_;
}
close(FCI);
if ($action eq "generate") {
generate_hfs(\$fileContents, $fileName);
}
if ($action eq "fix-all") {
# Read in the hf "input" file
$line_number = 0;
my $errors = 0;
open(FCI, "<", $fileName . ".proto_tree_input") || die("Couldn't open $fileName.proto_tree_input");
while(my $line=<FCI>){
my @proto_tree_item = split(/;|\n/, $line);
$line_number++;
$errors += verify_line(@proto_tree_item);
push(@proto_tree_list, \@proto_tree_item);
if ($proto_tree_item[1] eq "2") {
push(@expert_list, \@proto_tree_item);
}
}
close(FCI);
if ($errors > 0) {
print "Aborting conversion.\n";
exit(-1);
}
fix_proto_tree_add_text(\$fileContents, $fileName);
# Write out the hf data
output_hf_array($fileName);
output_hf($fileName);
# Write out the changed version to a file
open(FCO, ">", $fileName . ".proto_tree_add_text");
print FCO "$fileContents";
close(FCO);
}
if ($action eq "find-all") {
# Find all proto_tree_add_text() statements eligible for conversion
$found_total += find_all(\$fileContents, $fileName);
}
} # while
exit $found_total;
# ---------------------------------------------------------------------
# Sanity check the data in the .proto_tree_input file
sub verify_line {
my( @proto_tree_item) = @_;
my $errors = 0;
#do some basic error checking of the file
if ($proto_tree_item[0] eq "1") {
if (!($proto_tree_item[3] =~ /^hf_/)) {
print "$line_number: Poorly formed hf_ variable ($proto_tree_item[3])!\n";
$errors++;
}
foreach (split(/\|/, $proto_tree_item[7])) {
if (!exists($ENCODINGS{$_})) {
print "$line_number: Encoding value '$_' unknown!\n";
$errors++;
}
}
} elsif (($proto_tree_item[0] eq "10") ||
($proto_tree_item[0] eq "11") ||
($proto_tree_item[0] eq "12") ||
($proto_tree_item[0] eq "13")) {
#expert info conversions
if (!($proto_tree_item[3] =~ /^ei_/)) {
print "$line_number: Poorly formed ei_ variable ($proto_tree_item[3])!\n";
$errors++;
}
} elsif ($proto_tree_item[0] ne "0") {
print "Bad conversion value! Aborting conversion.\n";
$errors++;
}
if ($proto_tree_item[1] eq "1") {
if (!($proto_tree_item[3] =~ /^hf_/)) {
print "$line_number: Poorly formed hf_ variable ($proto_tree_item[3])!\n";
$errors++;
}
if (!exists($FIELD_TYPE{$proto_tree_item[9]})) {
print "$line_number: Field type '$proto_tree_item[9]' unknown!\n";
$errors++;
}
foreach (split(/\|/, $proto_tree_item[11])) {
if ((!exists($DISPLAY_BASE{$_})) &&
(!($proto_tree_item[11] =~ /\d+/))) {
print "$line_number: Display base '$proto_tree_item[11]' unknown!\n";
$errors++;
}
}
if (($proto_tree_item[9] eq "FT_UINT8") ||
($proto_tree_item[9] eq "FT_UINT16") ||
($proto_tree_item[9] eq "FT_UINT24") ||
($proto_tree_item[9] eq "FT_UINT32") ||
($proto_tree_item[9] eq "FT_UINT64") ||
($proto_tree_item[9] eq "FT_INT8") ||
($proto_tree_item[9] eq "FT_INT16") ||
($proto_tree_item[9] eq "FT_INT24") ||
($proto_tree_item[9] eq "FT_INT32") ||
($proto_tree_item[9] eq "FT_INT64")) {
if ($proto_tree_item[11] eq "BASE_NONE") {
print "$line_number: Interger type should not be BASE_NONE!\n";
$errors++;
}
}
} elsif ($proto_tree_item[1] eq "2") {
if (!($proto_tree_item[3] =~ /^ei_/)) {
print "$line_number: Poorly formed ei_ variable ($proto_tree_item[3])!\n";
$errors++;
}
if (!exists($EXPERT_SEVERITY{$proto_tree_item[9]})) {
print "$line_number: Expert severity value '$proto_tree_item[9]' unknown!\n";
$errors++;
}
if (!exists($EXPERT_GROUPS{$proto_tree_item[7]})) {
print "$line_number: Expert group value '$proto_tree_item[7]' unknown!\n";
$errors++;
}
} elsif ($proto_tree_item[1] ne "0") {
print "$line_number: Bad hf/ei variable generation value!\n";
$errors++;
}
return $errors;
}
sub generate_hfs {
my( $fileContentsRef, $fileName) = @_;
my @args;
my $num_items = 0;
my @temp;
my $str_temp;
my $pat;
if ($expert ne "") {
$pat = qr /
(
(?:proto_tree_add_text)\s* \(
(([^[\,;])*\,){4,}
[^;]*
\s* \) \s* ;
)
/xs;
} else {
$pat = qr /
(
(?:proto_tree_add_text)\s* \(
(([^[\,;])*\,){5,}
[^;]*
\s* \) \s* ;
)
/xs;
}
while ($$fileContentsRef =~ / $pat /xgso) {
my @proto_tree_item = (1, 1, "tree", "hf_name", "tvb", "offset", "length", "encoding",
"fieldfullname", "fieldtype", "fieldabbrevname", "BASE_NONE", "NULL", "0x0");
my $str = "${1}\n";
$str =~ tr/\t\n\r/ /d;
$str =~ s/ \s+ / /xg;
#print "$fileName: $str\n";
@args = split(/,/, $str);
#printf "ARGS(%d): %s\n", scalar @args, join("# ", @args);
$args[0] =~ s/proto_tree_add_text\s*\(\s*//;
$proto_tree_item[2] = $args[0]; #tree
$proto_tree_item[4] = trim($args[1]); #tvb
$proto_tree_item[5] = trim($args[2]); #offset
$proto_tree_item[6] = trim($args[3]); #length
if (scalar @args == 5) {
#remove the "); at the end
$args[4] =~ s/\"\s*\)\s*;$//;
}
#encoding
if (scalar @args > 5) {
if (($proto_tree_item[6] eq "1") ||
($args[5] =~ /tvb_get_guint8/) ||
($args[5] =~ /tvb_bytes_to_str/) ||
($args[5] =~ /tvb_ether_to_str/)) {
$proto_tree_item[7] = "ENC_NA";
} elsif ($args[5] =~ /tvb_get_ntoh/) {
$proto_tree_item[7] = "ENC_BIG_ENDIAN";
} elsif ($args[5] =~ /tvb_get_letoh/) {
$proto_tree_item[7] = "ENC_LITTLE_ENDIAN";
} elsif (($args[5] =~ /tvb_get_ephemeral_string/) ||
($args[5] =~ /tvb_format_text/)){
$proto_tree_item[7] = "ENC_NA|ENC_ASCII";
} elsif ($encoding ne "") {
$proto_tree_item[7] = $encoding;
}
}
#field full name
if (($expert ne "") || (scalar @args > 5)) {
my @arg_temp = split(/=|:/, $args[4]);
$proto_tree_item[8] = $arg_temp[0];
} else {
$proto_tree_item[8] = $args[4];
}
$proto_tree_item[8] =~ s/\"//;
$proto_tree_item[8] = trim($proto_tree_item[8]);
if ($proto_tree_item[8] eq "%s\"") {
#assume proto_tree_add_text will not be converted
$proto_tree_item[0] = 0;
$proto_tree_item[1] = 0;
$proto_tree_item[3] = sprintf("hf_%s_", $protabbrev);
$proto_tree_item[10] = sprintf("%s.", $protabbrev);
} else {
#hf variable name
$proto_tree_item[3] = sprintf("hf_%s_%s", $protabbrev, lc($proto_tree_item[8]));
$proto_tree_item[3] =~ s/\s+|-|:/_/g;
#field abbreviated name
$proto_tree_item[10] = sprintf("%s.%s", $protabbrev, lc($proto_tree_item[8]));
$proto_tree_item[10] =~ s/\s+|-|:/_/g;
}
#VALS
if ($str =~ /val_to_str(_const)?\(\s*tvb_get_[^\(]*\([^\,]*,[^\)]*\)\s*\,\s*([^\,]*)\s*\,\s*([^\)]*)\)/) {
$proto_tree_item[12] = sprintf("VALS(%s)", trim($2));
} elsif ($str =~ /val_to_str(_const)?\([^\,]*\,([^\,]*)\,/) {
$proto_tree_item[12] = sprintf("VALS(%s)", trim($2));
} elsif ($str =~ /val_to_str_ext(_const)?\(\s*tvb_get_[^\(]*\([^\,]*,[^\)]*\)\s*\,\s*([^\,]*)\s*\,\s*([^\)]*)\)/) {
$proto_tree_item[12] = trim($2);
} elsif ($str =~ /val_to_str_ext(_const)?\([^\,]*\,([^\,]*)\,/) {
$proto_tree_item[12] = trim($2);
}
#field type
if (scalar @args > 5) {
if ($args[5] =~ /tvb_get_guint8/) {
if ($args[4] =~ /%[0-9]*[i]/) {
$proto_tree_item[9] = "FT_INT8";
} else {
$proto_tree_item[9] = "FT_UINT8";
}
} elsif ($args[5] =~ /tvb_get_(n|"le")tohs/) {
if ($args[4] =~ /%[0-9]*[i]/) {
$proto_tree_item[9] = "FT_INT16";
} else {
$proto_tree_item[9] = "FT_UINT16";
}
} elsif ($args[5] =~ /tvb_get_(n|"le")toh24/) {
if ($args[4] =~ /%[0-9]*[i]/) {
$proto_tree_item[9] = "FT_INT24";
} else {
$proto_tree_item[9] = "FT_UINT24";
}
} elsif ($args[5] =~ /tvb_get_(n|"le")tohl/) {
if ($args[4] =~ /%[0-9]*[i]/) {
$proto_tree_item[9] = "FT_INT32";
} else {
$proto_tree_item[9] = "FT_UINT32";
}
} elsif ($args[5] =~ /tvb_get_(n|"le")toh("40"|"48"|"56"|"64")/) {
if ($args[4] =~ /%[0-9]*[i]/) {
$proto_tree_item[9] = "FT_INT64";
} else {
$proto_tree_item[9] = "FT_UINT64";
}
} elsif (($args[5] =~ /tvb_get_(n|"le")tohieee_float/) ||
($args[4] =~ /%[0-9\.]*[fFeEgG]/)) {
$proto_tree_item[9] = "FT_FLOAT";
} elsif ($args[5] =~ /tvb_get_(n|"le")tohieee_double/) {
$proto_tree_item[9] = "FT_DOUBLE";
} elsif (($args[5] =~ /tvb_get_ipv4/) ||
($args[5] =~ /tvb_ip_to_str/)) {
$proto_tree_item[9] = "FT_IPv4";
} elsif (($args[5] =~ /tvb_get_ipv6/) ||
($args[5] =~ /tvb_ip6_to_str/)) {
$proto_tree_item[9] = "FT_IPv6";
} elsif ($args[5] =~ /tvb_get_(n|"le")tohguid/) {
$proto_tree_item[9] = "FT_GUID";
} elsif ($args[5] =~ /tvb_get_ephemeral_stringz/) {
$proto_tree_item[9] = "FT_STRINGZ";
} elsif (($args[5] =~ /tvb_get_ephemeral_string/) ||
($args[5] =~ /tvb_format_text/)){
$proto_tree_item[9] = "FT_STRING";
} elsif (($args[5] =~ /tvb_bytes_to_str/)) {
$proto_tree_item[9] = "FT_BYTES";
} elsif ($args[5] =~ /tvb_ether_to_str/) {
$proto_tree_item[9] = "FT_ETHER";
}
#if we still can't determine type, assume a constant length
#value means we have an unsigned value
if ($proto_tree_item[9] eq "fieldtype") {
my $len_str = trim($args[3]);
if ($len_str eq "1") {
$proto_tree_item[9] = "FT_UINT8";
} elsif ($len_str eq "2") {
$proto_tree_item[9] = "FT_UINT16";
} elsif ($len_str eq "3") {
$proto_tree_item[9] = "FT_UINT24";
} elsif ($len_str eq "4") {
$proto_tree_item[9] = "FT_UINT32";
} elsif ($len_str eq "8") {
$proto_tree_item[9] = "FT_UINT64";
}
}
}
#display base
if ($args[4] =~ /%[0-9]*[xX]/) {
$proto_tree_item[11] = "BASE_HEX";
} elsif ($args[4] =~ /%[0-9]*[uld]/) {
$proto_tree_item[11] = "BASE_DEC";
} elsif ($args[4] =~ /%[0-9]*o/) {
$proto_tree_item[11] = "BASE_OCT";
}
if ($str =~ /val_to_str_ext(_const)?\([^\,]*\,([^\,]*)\,/) {
$proto_tree_item[11] .= "|BASE_EXT_STRING";
}
if (($proto_tree_item[7] eq "encoding") && ($proto_tree_item[9] eq "FT_BYTES")) {
$proto_tree_item[7] = "ENC_NA";
}
push(@proto_tree_list, \@proto_tree_item);
$num_items += 1;
}
if ($num_items > 0) {
open(FCO, ">", $fileName . ".proto_tree_input");
for my $item (@proto_tree_list) {
print FCO join(";", @{$item}), "\n";
}
close(FCO);
}
}
# ---------------------------------------------------------------------
# Find all proto_tree_add_text calls and replace them with the data
# found in proto_tree_list
sub fix_proto_tree_add_text {
my( $fileContentsRef, $fileName) = @_;
my $found = 0;
my $pat;
if ($expert ne "") {
$pat = qr /
(
(?:proto_tree_add_text)\s* \(
(([^[\,;])*\,){4,}
[^;]*
\s* \) \s* ;
)
/xs;
} else {
$pat = qr /
(
(?:proto_tree_add_text)\s* \(
(([^[\,;])*\,){5,}
[^;]*
\s* \) \s* ;
)
/xs;
}
$$fileContentsRef =~ s/ $pat /patsub($found, $1)/xges;
}
# ---------------------------------------------------------------------
# Format proto_tree_add_item or expert info functions with proto_tree_list data
sub patsub {
my $item_str;
if ($proto_tree_list[$_[0]][0] eq "1") {
$item_str = sprintf("proto_tree_add_item(%s, %s, %s, %s, %s, %s);",
$proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3],
$proto_tree_list[$_[0]][4], $proto_tree_list[$_[0]][5],
$proto_tree_list[$_[0]][6], $proto_tree_list[$_[0]][7]);
} elsif ($proto_tree_list[$_[0]][0] eq "10") {
$item_str = sprintf("expert_add_info(pinfo, %s, &%s);",
$proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3]);
} elsif ($proto_tree_list[$_[0]][0] eq "11") {
$item_str = sprintf("expert_add_info_format(pinfo, %s, &%s, \"%s\"",
$proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3],
$proto_tree_list[$_[0]][8]);
if ($proto_tree_list[$_[0]][11] ne "") {
$item_str .= ", $proto_tree_list[$_[0]][11]";
}
$item_str .= ");";
} elsif ($proto_tree_list[$_[0]][0] eq "12") {
$item_str = sprintf("proto_tree_add_expert(%s, pinfo, &%s, %s, %s, %s);",
$proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3],
$proto_tree_list[$_[0]][4], $proto_tree_list[$_[0]][5],
$proto_tree_list[$_[0]][6]);
} elsif ($proto_tree_list[$_[0]][0] eq "13") {
$item_str = sprintf("proto_tree_add_expert_format(%s, pinfo, &%s, %s, %s, %s, \"%s\"",
$proto_tree_list[$_[0]][2], $proto_tree_list[$_[0]][3],
$proto_tree_list[$_[0]][4], $proto_tree_list[$_[0]][5],
$proto_tree_list[$_[0]][6], $proto_tree_list[$_[0]][8]);
if ($proto_tree_list[$_[0]][11] ne "") {
$item_str .= ", $proto_tree_list[$_[0]][11]";
}
$item_str .= ");";
} else {
$item_str = $1;
}
$_[0] += 1;
return $item_str;
}
# ---------------------------------------------------------------------
# Output the hf variable declarations. For now, write them to a file.
# XXX - Eventually find the right place to add it to the modified dissector file
sub output_hf {
my( $fileName) = @_;
my %hfs = ();
my %eis = ();
my $index;
my $key;
open(FCO, ">", $fileName . ".hf");
print FCO "/* Generated from convert_proto_tree_add_text.pl */\n";
#add hfs to hash table to prevent against (accidental) duplicates
for ($index=0;$index<@proto_tree_list;$index++) {
if ($proto_tree_list[$index][1] eq "1") {
$hfs{$proto_tree_list[$index][3]} = $proto_tree_list[$index][3];
print FCO "static int $proto_tree_list[$index][3] = -1;\n";
} elsif ($proto_tree_list[$index][1] eq "2") {
$eis{$proto_tree_list[$index][3]} = $proto_tree_list[$index][3];
}
}
if (scalar keys %hfs > 0) {
print FCO "\n\n";
}
print FCO "/* Generated from convert_proto_tree_add_text.pl */\n";
foreach $key (keys %eis) {
print FCO "static expert_field $key = EI_INIT;\n";
}
close(FCO);
}
# ---------------------------------------------------------------------
# Output the hf array items. For now, write them to a file.
# XXX - Eventually find the right place to add it to the modified dissector file
# (bonus points if formatting of hf array in dissector file is kept)
sub output_hf_array {
my( $fileName) = @_;
my $index;
my %hfs = ();
my %eis = ();
open(FCO, ">", $fileName . ".hf_array");
print FCO " /* Generated from convert_proto_tree_add_text.pl */\n";
for ($index=0;$index<@proto_tree_list;$index++) {
if ($proto_tree_list[$index][1] eq "1") {
if (exists($hfs{$proto_tree_list[$index][3]})) {
print "duplicate hf entry '$proto_tree_list[$index][3]' found! Aborting conversion.\n";
exit(-1);
}
$hfs{$proto_tree_list[$index][3]} = $proto_tree_list[$index][3];
print FCO " { &$proto_tree_list[$index][3], { \"$proto_tree_list[$index][8]\", \"$proto_tree_list[$index][10]\", ";
print FCO "$proto_tree_list[$index][9], $proto_tree_list[$index][11], $proto_tree_list[$index][12], $proto_tree_list[$index][13], NULL, HFILL }},\r\n";
}
}
if ($index > 0) {
print FCO "\n\n";
}
print FCO " /* Generated from convert_proto_tree_add_text.pl */\n";
for ($index=0;$index<@expert_list;$index++) {
if (exists($eis{$expert_list[$index][3]})) {
print "duplicate ei entry '$expert_list[$index][3]' found! Aborting conversion.\n";
exit(-1);
}
$eis{$expert_list[$index][3]} = $expert_list[$index][3];
print FCO " { &$expert_list[$index][3], { \"$expert_list[$index][10]\", $expert_list[$index][7], ";
print FCO "$expert_list[$index][9], \"$expert_list[$index][8]\", EXPFILL }},\r\n";
}
close(FCO);
}
# ---------------------------------------------------------------------
# Find all proto_tree_add_text calls that have parameters passed in them
# and output number found
sub find_all {
my( $fileContentsRef, $fileName) = @_;
my $found = 0;
my $tvb_found = 0;
my $pat;
my $tvb_percent;
if ($expert ne "") {
$pat = qr /
(
(?:proto_tree_add_text)\s* \(
(([^[\,;])*\,){4,}
[^;]*
\s* \) \s* ;
)
/xs;
} else {
$pat = qr /
(
(?:proto_tree_add_text)\s* \(
(([^[\,;])*\,){5,}
[^;]*
\s* \) \s* ;
)
/xs;
}
while ($$fileContentsRef =~ / $pat /xgso) {
my $str = "${1}\n";
my @args = split(/,/, ${1});
#cleanup whitespace to show proto_tree_add_text in single line (easier for seeing grep results)
$str =~ tr/\t\n\r/ /d;
$str =~ s/ \s+ / /xg;
#print "$fileName: $str\n";
#find all instances where proto_tree_add_text has a tvb_get (or similar) call, because
#convert_proto_tree_add_text.pl has an easier time determining hf_ field values with it
if (scalar @args > 5) {
my $tvb = trim($args[5]);
if ($tvb =~ /^tvb_/) {
$tvb_found += 1;
}
}
$found += 1;
}
if ($found > 0) {
if ($tvb_found > 0) {
$tvb_percent = 100*$tvb_found/$found;
printf "%s: Found %d proto_tree_add_text calls eligible for conversion, %d contain a \"tvb get\" call (%.2f%%).\n",
$fileName, $found, $tvb_found, $tvb_percent;
} else {
print "$fileName: Found $found proto_tree_add_text calls eligible for conversion, 0 \"tvb get\" calls.\n";
}
}
return $found;
} |
Shell Script | wireshark/tools/debian-nightly-package.sh | #!/bin/bash
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
set -e
if test -z $1; then
echo "Usage:"
echo " $0 <distribution>"
echo " e.g: $0 xenial"
exit 1
fi
DIST=$1
VERSION=$(git describe --tags | sed 's/v//;s/-/~/g;s/rc/~rc/')
ln --symbolic --no-dereference --force packaging/debian ./debian
rm packaging/debian/changelog || true
EDITOR=touch dch -p --package wireshark --create --force-distribution -v${VERSION}~${DIST}1 -D $DIST
sed -i 's/\* Initial release.*/* Nightly build for '${DIST^}'/' packaging/debian/changelog
dpkg-buildpackage -S -d |
Shell Script | wireshark/tools/debian-setup.sh | #!/bin/bash
# Setup development environment on Debian and derivatives such as Ubuntu
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# We drag in tools that might not be needed by all users; it's easier
# that way.
#
set -e -u -o pipefail
function print_usage() {
printf "\\nUtility to setup a debian-based system for Wireshark Development.\\n"
printf "The basic usage installs the needed software\\n\\n"
printf "Usage: %s [--install-optional] [--install-deb-deps] [...other options...]\\n" "$0"
printf "\\t--install-optional: install optional software as well\\n"
printf "\\t--install-deb-deps: install packages required to build the .deb file\\n"
printf "\\t--install-test-deps: install packages required to run all tests\\n"
printf "\\t--install-qt5-deps: force installation of packages required to use Qt5\\n"
printf "\\t--install-qt6-deps: force installation of packages required to use Qt6\\n"
printf "\\t--install-all: install everything\\n"
printf "\\t[other]: other options are passed as-is to apt\\n"
}
ADDITIONAL=0
DEBDEPS=0
TESTDEPS=0
ADD_QT5=0
ADD_QT6=0
HAVE_ADD_QT=0
OPTIONS=
for arg; do
case $arg in
--help)
print_usage
exit 0
;;
--install-optional)
ADDITIONAL=1
;;
--install-deb-deps)
DEBDEPS=1
;;
--install-test-deps)
TESTDEPS=1
;;
--install-qt5-deps)
ADD_QT5=1
;;
--install-qt6-deps)
ADD_QT6=1
;;
--install-all)
ADDITIONAL=1
DEBDEPS=1
TESTDEPS=1
ADD_QT5=1
ADD_QT6=1
HAVE_ADD_QT=1
;;
*)
OPTIONS="$OPTIONS $arg"
;;
esac
done
# Check if the user is root
if [ "$(id -u)" -ne 0 ]
then
echo "You must be root."
exit 1
fi
BASIC_LIST="gcc \
g++\
libglib2.0-dev \
libc-ares-dev \
libpcap-dev \
libpcre2-dev \
flex \
make \
python3 \
libgcrypt-dev \
libspeexdsp-dev"
QT5_LIST="qttools5-dev \
qttools5-dev-tools \
libqt5svg5-dev \
qtmultimedia5-dev \
qtbase5-dev \
qtchooser \
qt5-qmake \
qtbase5-dev-tools"
QT6_LIST="qt6-base-dev \
qt6-multimedia-dev \
qt6-tools-dev \
qt6-tools-dev-tools \
qt6-l10n-tools \
libqt6core5compat6-dev \
freeglut3-dev \
libvulkan-dev \
libxkbcommon-dev"
if [ $ADD_QT5 -ne 0 ]
then
BASIC_LIST="$BASIC_LIST $QT5_LIST"
HAVE_ADD_QT=1
fi
if [ $ADD_QT6 -ne 0 ]
then
BASIC_LIST="$BASIC_LIST $QT6_LIST"
HAVE_ADD_QT=1
fi
if [ $HAVE_ADD_QT -eq 0 ]
then
# Try to select Qt version from distro
test -e /etc/os-release && os_release='/etc/os-release' || os_release='/usr/lib/os-release'
# shellcheck disable=SC1090
. "${os_release}"
# Ubuntu 22.04 (jammy) or later
MAJOR=$(echo "$VERSION_ID" | cut -f1 -d.)
if [ "${ID:-linux}" = "ubuntu" ] && [ "${MAJOR:-0}" -ge "22" ]; then
echo "Installing Qt6."
BASIC_LIST="$BASIC_LIST $QT6_LIST"
else
echo "Installing Qt5."
BASIC_LIST="$BASIC_LIST $QT5_LIST"
fi
fi
ADDITIONAL_LIST="libnl-3-dev \
libkrb5-dev \
libsmi2-dev \
libsbc-dev \
liblua5.2-dev \
libnl-cli-3-dev \
libparse-yapp-perl \
libcap-dev \
liblz4-dev \
libsnappy-dev \
libzstd-dev \
libspandsp-dev \
libxml2-dev \
libminizip-dev \
git \
ninja-build \
perl \
xsltproc \
ccache \
doxygen"
# Uncomment to add PNG compression utilities used by compress-pngs:
# ADDITIONAL_LIST="$ADDITIONAL_LIST \
# advancecomp \
# optipng \
# pngcrush"
DEBDEPS_LIST="debhelper \
dh-python \
asciidoctor \
docbook-xml \
docbook-xsl \
libxml2-utils \
lintian \
lsb-release \
po-debconf \
python3-ply \
quilt"
TESTDEPS_LIST="python3-pytest \
python3-pytest-xdist"
# Adds package $2 to list variable $1 if the package is found.
# If $3 is given, then this version requirement must be satisfied.
add_package() {
local list="$1" pkgname="$2" versionreq="${3:-}" version
version=$(apt-cache show "$pkgname" 2>/dev/null |
awk '/^Version:/{ print $2; exit}')
# fail if the package is not known
if [ -z "$version" ]; then
return 1
elif [ -n "$versionreq" ]; then
# Require minimum version or fail.
# shellcheck disable=SC2086
dpkg --compare-versions $version $versionreq || return 1
fi
# package is found, append it to list
eval "${list}=\"\${${list}} \${pkgname}\""
}
# apt-get update must be called before calling add_package
# otherwise available packages appear as unavailable
apt-get update || exit 2
# cmake3 3.5.1: Ubuntu 14.04
# cmake >= 3.5: Debian >= jessie-backports, Ubuntu >= 16.04
add_package BASIC_LIST cmake3 ||
BASIC_LIST="$BASIC_LIST cmake"
# Debian >= wheezy-backports, Ubuntu >= 16.04
add_package ADDITIONAL_LIST libnghttp2-dev ||
echo "libnghttp2-dev is unavailable" >&2
# libssh-gcrypt-dev: Debian >= jessie, Ubuntu >= 16.04
# libssh-dev (>= 0.6): Debian >= jessie, Ubuntu >= 14.04
add_package ADDITIONAL_LIST libssh-gcrypt-dev ||
add_package ADDITIONAL_LIST libssh-dev ||
echo "libssh-gcrypt-dev and libssh-dev are unavailable" >&2
# libgnutls28-dev: Debian >= wheezy-backports, Ubuntu >= 12.04
add_package ADDITIONAL_LIST libgnutls28-dev ||
echo "libgnutls28-dev is unavailable" >&2
# Debian >= jessie-backports, Ubuntu >= 16.04
add_package ADDITIONAL_LIST libmaxminddb-dev ||
echo "libmaxminddb-dev is unavailable" >&2
# Debian >= stretch-backports, Ubuntu >= 16.04
add_package ADDITIONAL_LIST libbrotli-dev ||
echo "libbrotli-dev is unavailable" >&2
# libsystemd-journal-dev: Ubuntu 14.04
# libsystemd-dev: Ubuntu >= 16.04
add_package ADDITIONAL_LIST libsystemd-dev ||
add_package ADDITIONAL_LIST libsystemd-journal-dev ||
echo "libsystemd-dev is unavailable"
# ilbc library from http://www.deb-multimedia.org
add_package ADDITIONAL_LIST libilbc-dev ||
echo "libilbc-dev is unavailable"
# opus library libopus-dev
add_package ADDITIONAL_LIST libopus-dev ||
echo "libopus-dev is unavailable"
# bcg729 library libbcg729-dev
add_package ADDITIONAL_LIST libbcg729-dev ||
echo "libbcg729-dev is unavailable"
# softhsm2 2.0.0: Ubuntu 16.04
# softhsm2 2.2.0: Debian >= jessie-backports, Ubuntu 18.04
# softhsm2 >= 2.4.0: Debian >= buster, Ubuntu >= 18.10
if ! add_package TESTDEPS_LIST softhsm2 '>= 2.3.0'; then
if add_package TESTDEPS_LIST softhsm2; then
# If SoftHSM 2.3.0 is unavailble, install p11tool.
TESTDEPS_LIST="$TESTDEPS_LIST gnutls-bin"
else
echo "softhsm2 is unavailable" >&2
fi
fi
ACTUAL_LIST=$BASIC_LIST
# Now arrange for optional support libraries
if [ $ADDITIONAL -ne 0 ]
then
ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST"
fi
if [ $DEBDEPS -ne 0 ]
then
ACTUAL_LIST="$ACTUAL_LIST $DEBDEPS_LIST"
fi
if [ $TESTDEPS -ne 0 ]
then
ACTUAL_LIST="$ACTUAL_LIST $TESTDEPS_LIST"
fi
# shellcheck disable=SC2086
apt-get install $ACTUAL_LIST $OPTIONS || exit 2
if [ $ADDITIONAL -eq 0 ]
then
printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n"
fi
if [ $DEBDEPS -eq 0 ]
then
printf "\n*** Debian packages build deps not installed. Rerun with --install-deb-deps to have them.\n"
fi
if [ $TESTDEPS -eq 0 ]
then
printf "\n*** Test deps not installed. Rerun with --install-test-deps to have them.\n"
fi |
wireshark/tools/debug-alloc.env | ##############################################################################
### Set up environment variables for testing ###
##############################################################################
# Use the Wmem strict allocator which does canaries and scrubbing etc.
export WIRESHARK_DEBUG_WMEM_OVERRIDE=strict
# Abort if a dissector adds too many items to the tree
export WIRESHARK_ABORT_ON_TOO_MANY_ITEMS=
# Turn on GLib memory debugging (since 2.13)
export G_SLICE=debug-blocks
# Cause glibc (Linux) to abort() if some memory errors are found
export MALLOC_CHECK_=3
# Cause FreeBSD (and other BSDs) to abort() on allocator warnings and
# initialize allocated memory (to 0xa5) and freed memory (to 0x5a). see:
# https://www.freebsd.org/cgi/man.cgi?query=malloc&apropos=0&sektion=0&manpath=FreeBSD+8.2-RELEASE&format=html
export MALLOC_OPTIONS=AJ
# macOS options; see https://developer.apple.com/library/archive/documentation/Performance/Conceptual/ManagingMemory/Articles/MallocDebug.html
# Initialize allocated memory to 0xAA and freed memory to 0x55
export MallocPreScribble=1
export MallocScribble=1
# Add guard pages before and after large allocations
export MallocGuardEdges=1
# Call abort() if heap corruption is detected. Heap is checked every 1000
# allocations (may need to be tuned!)
export MallocCheckHeapStart=1000
export MallocCheckHeapEach=1000
export MallocCheckHeapAbort=1
# Call abort() if an illegal free() call is made
export MallocBadFreeAbort=1 |
|
Python | wireshark/tools/delete_includes.py | #!/usr/bin/python3
# Martin Mathieson
# Look for and removes unnecessary includes in .cpp or .c files
# Run from wireshark source folder as e.g.,
# ./tools/delete_includes.py --build-folder ~/wireshark-build/ --folder epan/dissectors/
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import subprocess
import os
import sys
import shutil
import argparse
import signal
import re
from pathlib import Path
# Try to exit soon after Ctrl-C is pressed.
should_exit = False
def signal_handler(sig, frame):
global should_exit
should_exit = True
print('You pressed Ctrl+C - exiting')
signal.signal(signal.SIGINT, signal_handler)
# For text colouring/highlighting.
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
ADDED = '\033[45m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# command-line args
#
# Controls which dissector files should be checked. If no args given, will just
# scan whole epan/dissectors folder.
parser = argparse.ArgumentParser(description='Check calls in dissectors')
# required
parser.add_argument('--build-folder', action='store', required=True,
help='specify individual dissector file to test')
parser.add_argument('--file', action='append',
help='specify individual dissector file to test')
parser.add_argument('--folder', action='store', default=os.path.join('epan', 'dissectors'),
help='specify folder to test, relative to current/wireshark folder')
parser.add_argument('--commits', action='store',
help='last N commits to check')
parser.add_argument('--open', action='store_true',
help='check open files')
parser.add_argument('--first-file', action='store',
help='first file in folder to test')
parser.add_argument('--last-file', action='store',
help='last file in folder to test')
args = parser.parse_args()
test_folder = os.path.join(os.getcwd(), args.folder)
# Usually only building one module, so no -j benefit?
make_command = ['cmake', '--build', args.build_folder]
if sys.platform.startswith('win'):
make_command += ['--config', 'RelWithDebInfo']
# A list of header files that it is not safe to uninclude, as doing so
# has been seen to cause link failures against implemented functions...
# TODO: some of these could probably be removed on more permissive platforms.
includes_to_keep = {
'config.h',
'epan/packet.h',
'stdlib.h',
'math.h',
'errno.h',
'string.h',
'prefs.h',
# These are probably mostly redundant in that they are now covered by the check
# for 'self-includes'...
'x11-keysym.h',
'packet-atm.h',
'packet-atalk.h',
'packet-ppp.h',
'packet-scsi-mmc.h',
'packet-tls.h'
}
# Build stats.
class BuildStats:
def __init__(self):
self.files_examined = 0
self.includes_tested = 0
self.includes_deleted = 0
self.files_not_built_list = []
self.generated_files_ignored = []
self.includes_to_keep_kept = 0
def showSummary(self):
print('\n\n')
print('Summary')
print('=========')
print('files examined: %d' % self.files_examined)
print('includes tested: %d' % self.includes_tested)
print('includes deleted: %d' % self.includes_deleted)
print('files not built: %d' % len(self.files_not_built_list))
for abandoned_file in self.files_not_built_list:
print(' %s' % abandoned_file)
print('generated files not tested: %d' % len(self.generated_files_ignored))
for generated_file in self.generated_files_ignored:
print(' %s' % generated_file)
print('includes kept as not safe to remove: %d' % self.includes_to_keep_kept)
stats = BuildStats()
# We want to confirm that this file is actually built as part of the build.
# To do this, add some nonsense to the front of the file and confirm that the
# build then fails. If it doesn't, won't want to remove #includes from that file!
def test_file_is_built(filename):
print('test_file_is_built(', filename, ')')
temp_filename = filename + '.tmp'
f_read = open(filename, 'r')
write_filename = filename + '.new'
f_write = open(write_filename, 'w')
# Write the file with nonsense at start.
f_write.write('NO WAY THIS FILE BUILDS!!!!!')
# Copy remaining lines as-is.
for line in f_read:
f_write.write(line)
f_read.close()
f_write.close()
# Backup file, and do this build with the one we wrote.
shutil.copy(filename, temp_filename)
shutil.copy(write_filename, filename)
# Try the build.
result = subprocess.call(make_command)
# Restore proper file & delete temp files
shutil.copy(temp_filename, filename)
os.remove(temp_filename)
os.remove(write_filename)
if result == 0:
# Build succeeded so this file wasn't in it
return False
else:
# Build failed so this file *is* part of it
return True
# Function to test removal of each #include from a file in turn.
# At the end, only those that appear to be needed will be left.
def test_file(filename):
global stats
print('\n------------------------------')
print(bcolors.OKBLUE, bcolors.BOLD, 'Testing', filename, bcolors.ENDC)
temp_filename = filename + '.tmp'
# Test if file seems to be part of the build.
is_built = test_file_is_built(filename)
if not is_built:
print(bcolors.WARNING, '***** File not used in build, so ignore!!!!', bcolors.ENDC)
# TODO: should os.path.join with root before adding?
stats.files_not_built_list.append(filename)
return
else:
print('This file is part of the build')
# OK, we are going to test removing includes from this file.
tested_line_number = 0
# Don't want to delete 'self-includes', so prepare filename.
module_name = Path(filename).stem
extension = Path(filename).suffix
module_header = module_name + '.h'
# Loop around, finding all possible include lines to comment out
while (True):
if should_exit:
exit(1)
have_deleted_line = False
result = 0
# Open read & write files
f_read = open(filename, 'r')
write_filename = filename + '.new'
f_write = open(write_filename, 'w')
# Walk the file again looking for another place to comment out an include
this_line_number = 1
hash_if_level = 0
for line in f_read:
this_line_deleted = False
# Maintain view of how many #if or #ifdefs we are in.
# Don't want to remove any includes that may not be active in this build.
if line.startswith('#if'):
hash_if_level = hash_if_level + 1
if line.startswith('#endif'):
if hash_if_level > 1:
hash_if_level = hash_if_level - 1
# Consider deleting this line have haven't already reached.
if (not have_deleted_line and (tested_line_number < this_line_number)):
# Test line for starting with #include, and eligible for deletion.
if line.startswith('#include ') and hash_if_level == 0 and line.find(module_header) == -1:
# Check that this isn't a header file that known unsafe to uninclude.
allowed_to_delete = True
for entry in includes_to_keep:
if line.find(entry) != -1:
allowed_to_delete = False
stats.includes_to_keep_kept += 1
continue
if allowed_to_delete:
# OK, actually doing it.
have_deleted_line = True
this_line_deleted = True
tested_line_number = this_line_number
# Write line to output file, unless this very one was deleted.
if not this_line_deleted:
f_write.write(line)
this_line_number = this_line_number + 1
# Close both files.
f_read.close()
f_write.close()
# If we commented out a line, try to build file without it.
if (have_deleted_line):
# Test a build. 0 means success, others are failures.
shutil.copy(filename, temp_filename)
shutil.copy(write_filename, filename)
# Try build
result = subprocess.call(make_command)
if result == 0:
print(bcolors.OKGREEN +bcolors.BOLD + 'Good build' + bcolors.ENDC)
# Line was eliminated so decrement line counter
tested_line_number = tested_line_number - 1
# Inc successes counter
stats.includes_deleted += 1
# Good - promote this version by leaving it here!
# Occasionally fails so delete this file each time.
# TODO: this is very particular to dissector target...
if sys.argv[1] == 'dissectors':
os.remove(os.path.join(args.build_folder, 'vc100.pdb'))
else:
print(bcolors.FAIL +bcolors.BOLD + 'Bad build' + bcolors.ENDC)
# Never mind, go back to previous building version
shutil.copy(temp_filename, filename)
# Inc counter of tried
stats.includes_tested += 1
else:
# Reached the end of the file without making changes, so nothing doing.
# Delete temporary files
if os.path.isfile(temp_filename):
os.remove(temp_filename)
if os.path.isfile(write_filename):
os.remove(write_filename)
return
# Test for whether a the given file is under source control
def under_version_control(filename):
# TODO: git command to see if under version control. Check retcode of 'git log <filename>' ?
return True
# Test for whether the given file was automatically generated.
def generated_file(filename):
# Special known case.
if filename == 'register.c':
return True
# Open file
f_read = open(filename, 'r')
lines_tested = 0
for line in f_read:
# The comment to say that its generated is near the top, so give up once
# get a few lines down.
if lines_tested > 10:
f_read.close()
return False
if (line.find('Generated automatically') != -1 or
line.find('Generated Automatically') != -1 or
line.find('Autogenerated from') != -1 or
line.find('is autogenerated') != -1 or
line.find('automatically generated by Pidl') != -1 or
line.find('Created by: The Qt Meta Object Compiler') != -1 or
line.find('This file was generated') != -1 or
line.find('This filter was automatically generated') != -1 or
line.find('This file is auto generated, do not edit!') != -1):
f_read.close()
return True
lines_tested = lines_tested + 1
# OK, looks like a hand-written file!
f_read.close()
return False
def isBuildableFile(filename):
return filename.endswith('.c') or filename.endswith('.cpp')
def findFilesInFolder(folder, recursive=False):
dissector_files = []
if recursive:
for root, subfolders, files in os.walk(folder):
for f in files:
if should_exit:
return
f = os.path.join(root, f)
dissector_files.append(f)
else:
for f in sorted(os.listdir(folder)):
if should_exit:
return
filename = os.path.join(folder, f)
dissector_files.append(filename)
return [x for x in filter(isBuildableFile, dissector_files)]
######################################################################################
# MAIN PROGRAM STARTS HERE
######################################################################################
# Work out which files we want to look at.
files = []
if args.file:
# Add specified file(s)
for f in args.file:
if not os.path.isfile(f):
print('Chosen file', f, 'does not exist.')
exit(1)
else:
files.append(f)
elif args.folder:
# Add all files from a given folder.
folder = args.folder
if not os.path.isdir(folder):
print('Folder', folder, 'not found!')
exit(1)
# Find files from folder.
print('Looking for files in', folder)
files = findFilesInFolder(folder, recursive=False)
# If first-file/last-file are given, will need to trim files accordingly
if args.first_file:
idx = files.index(args.first_file)
if idx == -1:
print('first-file entry', args.first_file, 'not in list of files to be checked')
exit(1)
else:
files = files[idx:]
if args.last_file:
idx = files.index(args.last_file)
if idx == -1:
print('last-file entry', args.last_file, 'not in list of files to be checked')
exit(1)
else:
files = files[:idx+1]
# Confirm that the build is currently passing, if not give up now.
print(bcolors.OKBLUE,bcolors.BOLD,
'Doing an initial build to check we have a stable base.',
bcolors.ENDC)
result = subprocess.call(make_command)
if result != 0:
print(bcolors.FAIL, bcolors.BOLD, 'Initial build failed - give up now!!!!', bcolors.ENDC)
exit (-1)
# Test each file.
for filename in files:
# Want to filter out generated files that are not checked in.
if not generated_file(filename) and under_version_control(filename):
# OK, try this file
test_file(filename)
# Inc counter
stats.files_examined += 1
else:
if generated_file(filename):
reason = 'generated file...'
if not under_version_control(filename):
reason = 'not under source control'
print('Ignoring %s: %s' % (filename, reason))
# Show summary stats of run
stats.showSummary() |
Python | wireshark/tools/detect_bad_alloc_patterns.py | """
Detect and replace instances of g_malloc() and wmem_alloc() with
g_new() wmem_new(), to improve the readability of Wireshark's code.
Also detect and replace instances of
g_malloc(sizeof(struct myobj) * foo)
with:
g_new(struct myobj, foo)
to better prevent integer overflows
SPDX-License-Identifier: MIT
"""
import os
import re
import sys
print_replacement_info = True
patterns = [
# Replace (myobj *)g_malloc(sizeof(myobj)) with g_new(myobj, 1)
# Replace (struct myobj *)g_malloc(sizeof(struct myobj)) with g_new(struct myobj, 1)
(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*g_malloc(0?)\s*\(\s*sizeof\s*\(\s*\1\s*\)\s*\)'), r'g_new\2(\1, 1)'),
# Replace (myobj *)g_malloc(sizeof(myobj) * foo) with g_new(myobj, foo)
# Replace (struct myobj *)g_malloc(sizeof(struct myobj) * foo) with g_new(struct myobj, foo)
(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*g_malloc(0?)\s*\(\s*sizeof\s*\(\s*\1\s*\)\s*\*\s*([^\s]+)\s*\)'), r'g_new\2(\1, \3)'),
# Replace (myobj *)g_malloc(foo * sizeof(myobj)) with g_new(myobj, foo)
# Replace (struct myobj *)g_malloc(foo * sizeof(struct myobj)) with g_new(struct myobj, foo)
(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*g_malloc(0?)\s*\(\s*([^\s]+)\s*\*\s*sizeof\s*\(\s*\1\s*\)\s*\)'), r'g_new\2(\1, \3)'),
# Replace (myobj *)wmem_alloc(wmem_file_scope(), sizeof(myobj)) with wmem_new(wmem_file_scope(), myobj)
# Replace (struct myobj *)wmem_alloc(wmem_file_scope(), sizeof(struct myobj)) with wmem_new(wmem_file_scope(), struct myobj)
(re.compile(r'\(\s*([struct]{0,6}\s*[^\s\*]+)\s*\*\s*\)\s*wmem_alloc(0?)\s*\(\s*([_a-z\(\)->]+),\s*sizeof\s*\(\s*\1\s*\)\s*\)'), r'wmem_new\2(\3, \1)'),
]
def replace_file(fpath):
with open(fpath, 'r') as fh:
fdata_orig = fh.read()
fdata = fdata_orig
for pattern, replacewith in patterns:
fdata_out = pattern.sub(replacewith, fdata)
if print_replacement_info and fdata != fdata_out:
for match in re.finditer(pattern, fdata):
replacement = re.sub(pattern, replacewith, match.group(0))
print("Bad malloc pattern in %s: Replace '%s' with '%s'" % (fpath, match.group(0), replacement))
fdata = fdata_out
if fdata_out != fdata_orig:
with open(fpath, 'w') as fh:
fh.write(fdata_out)
return fdata_out
def run_specific_files(fpaths):
for fpath in fpaths:
if not (fpath.endswith('.c') or fpath.endswith('.cpp')):
continue
replace_file(fpath)
def run_recursive(root_dir):
for root, dirs, files in os.walk(root_dir):
fpaths = []
for fname in files:
fpath = os.path.join(root, fname)
fpaths.append(fpath)
run_specific_files(fpaths)
def test_replacements():
test_string = """\
(if_info_t*) g_malloc0(sizeof(if_info_t))
(oui_info_t *)g_malloc(sizeof (oui_info_t))
(guint8 *)g_malloc(16 * sizeof(guint8))
(guint32 *)g_malloc(sizeof(guint32)*2)
(struct imf_field *)g_malloc (sizeof (struct imf_field))
(rtspstat_t *)g_malloc( sizeof(rtspstat_t) )
(proto_data_t *)wmem_alloc(scope, sizeof(proto_data_t))
(giop_sub_handle_t *)wmem_alloc(wmem_epan_scope(), sizeof (giop_sub_handle_t))
(mtp3_addr_pc_t *)wmem_alloc0(pinfo->pool, sizeof(mtp3_addr_pc_t))
(dcerpc_bind_value *)wmem_alloc(wmem_file_scope(), sizeof (dcerpc_bind_value))
(dcerpc_matched_key *)wmem_alloc(wmem_file_scope(), sizeof (dcerpc_matched_key));
(struct smtp_session_state *)wmem_alloc0(wmem_file_scope(), sizeof(struct smtp_session_state))
(struct batman_packet_v5 *)wmem_alloc(pinfo->pool, sizeof(struct batman_packet_v5))
(struct knx_keyring_mca_keys*) wmem_alloc( wmem_epan_scope(), sizeof( struct knx_keyring_mca_keys ) )
"""
expected_output = """\
g_new0(if_info_t, 1)
g_new(oui_info_t, 1)
g_new(guint8, 16)
g_new(guint32, 2)
g_new(struct imf_field, 1)
g_new(rtspstat_t, 1)
wmem_new(scope, proto_data_t)
wmem_new(wmem_epan_scope(), giop_sub_handle_t)
wmem_new0(pinfo->pool, mtp3_addr_pc_t)
wmem_new(wmem_file_scope(), dcerpc_bind_value)
wmem_new(wmem_file_scope(), dcerpc_matched_key);
wmem_new0(wmem_file_scope(), struct smtp_session_state)
wmem_new(pinfo->pool, struct batman_packet_v5)
wmem_new(wmem_epan_scope(), struct knx_keyring_mca_keys)
"""
output = test_string
for pattern, replacewith in patterns:
output = pattern.sub(replacewith, output)
assert(output == expected_output)
def main():
test_replacements()
if len(sys.argv) == 2:
root_dir = sys.argv[1]
run_recursive(root_dir)
else:
fpaths = []
for line in sys.stdin:
line = line.strip()
if line:
fpaths.append(line)
run_specific_files(fpaths)
if __name__ == "__main__":
main() |
Python | wireshark/tools/eti2wireshark.py | #!/usr/bin/env python3
# Generate Wireshark Dissectors for eletronic trading/market data
# protocols such as ETI/EOBI.
#
# Targets Wireshark 3.5 or later.
#
# SPDX-FileCopyrightText: © 2021 Georg Sauthoff <[email protected]>
# SPDX-License-Identifier: GPL-2.0-or-later
import argparse
import itertools
import re
import sys
import xml.etree.ElementTree as ET
# inlined from upstream's etimodel.py
import itertools
def get_max_sizes(st, dt):
h = {}
for name, e in dt.items():
v = e.get('size', '0')
h[name] = int(v)
for name, e in itertools.chain((i for i in st.items() if i[1].get('type') != 'Message'),
(i for i in st.items() if i[1].get('type') == 'Message')):
s = 0
for m in e:
x = h.get(m.get('type'), 0)
s += x * int(m.get('cardinality'))
h[name] = s
return h
def get_min_sizes(st, dt):
h = {}
for name, e in dt.items():
v = e.get('size', '0')
if e.get('variableSize') is None:
h[name] = int(v)
else:
h[name] = 0
for name, e in itertools.chain((i for i in st.items() if i[1].get('type') != 'Message'),
(i for i in st.items() if i[1].get('type') == 'Message')):
s = 0
for m in e:
x = h.get(m.get('type'), 0)
s += x * int(m.get('minCardinality', '1'))
h[name] = s
return h
# end # inlined from upstream's etimodel.py
def get_used_types(st):
xs = set(y.get('type') for _, x in st.items() for y in x)
return xs
def get_data_types(d):
r = d.getroot()
x = r.find('DataTypes')
h = {}
for e in x:
h[e.get('name')] = e
return h
def get_structs(d):
r = d.getroot()
x = r.find('Structures')
h = {}
for e in x:
h[e.get('name')] = e
return h
def get_templates(st):
ts = []
for k, v in st.items():
if v.get('type') == 'Message':
ts.append((int(v.get('numericID')), k))
ts.sort()
return ts
def gen_header(proto, desc, o=sys.stdout):
if proto.startswith('eti') or proto.startswith('xti'):
ph = '#include "packet-tcp.h" // tcp_dissect_pdus()'
else:
ph = '#include "packet-udp.h" // udp_dissect_pdus()'
print(f'''// auto-generated by Georg Sauthoff's eti2wireshark.py
/* packet-eti.c
* Routines for {proto.upper()} dissection
* Copyright 2021, Georg Sauthoff <[email protected]>
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <[email protected]>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
/*
* The {desc} ({proto.upper()}) is an electronic trading protocol
* that is used by a few exchanges (Eurex, Xetra, ...).
*
* It's a Length-Tag based protocol consisting of mostly fix sized
* request/response messages.
*
* Links:
* https://en.wikipedia.org/wiki/List_of_electronic_trading_protocols#Europe
* https://github.com/gsauthof/python-eti#protocol-descriptions
* https://github.com/gsauthof/python-eti#protocol-introduction
*
*/
#include <config.h>
#include <epan/packet.h> // Should be first Wireshark include (other than config.h)
{ph}
#include <epan/expert.h> // expert info
#include <inttypes.h>
#include <stdio.h> // snprintf()
/* Prototypes */
/* (Required to prevent [-Wmissing-prototypes] warnings */
void proto_reg_handoff_{proto}(void);
void proto_register_{proto}(void);
''', file=o)
def name2ident(name):
ll = True
xs = []
for i, c in enumerate(name):
if c.isupper():
if i > 0 and ll:
xs.append('_')
xs.append(c.lower())
ll = False
else:
xs.append(c)
ll = True
return ''.join(xs)
def gen_enums(dt, ts, o=sys.stdout):
print('static const value_string template_id_vals[] = { // TemplateID', file=o)
min_tid, max_tid = ts[0][0], ts[-1][0]
xs = [None] * (max_tid - min_tid + 1)
for tid, name in ts:
xs[tid-min_tid] = name
for i, name in enumerate(xs):
if name is None:
print(f' {{ {min_tid + i}, "Unknown" }},', file=o)
else:
print(f' {{ {min_tid + i}, "{name}" }},', file=o)
print(''' { 0, NULL }
};
static value_string_ext template_id_vals_ext = VALUE_STRING_EXT_INIT(template_id_vals);''', file=o)
name2access = { 'TemplateID': '&template_id_vals_ext' }
dedup = {}
for name, e in dt.items():
vs = [ (x.get('value'), x.get('name')) for x in e.findall('ValidValue') ]
if not vs:
continue
if e.get('rootType') == 'String' and e.get('size') != '1':
continue
ident = name2ident(name)
nv = e.get('noValue')
ws = [ v[0] for v in vs ]
if nv not in ws:
if nv.startswith('0x0') and e.get('rootType') == 'String':
nv = '\0'
vs.append( (nv, 'NO_VALUE') )
if e.get('type') == 'int':
vs.sort(key = lambda x : int(x[0], 0))
else:
vs.sort(key = lambda x : ord(x[0]))
s = '-'.join(f'{v[0]}:{v[1]}' for v in vs)
x = dedup.get(s)
if x is None:
dedup[s] = name
else:
name2access[name] = name2access[x]
print(f'// {name} aliased by {x}', file=o)
continue
print(f'static const value_string {ident}_vals[] = {{ // {name}', file=o)
for i, v in enumerate(vs):
if e.get('rootType') == 'String':
k = f"'{v[0]}'" if ord(v[0]) != 0 else '0'
print(f''' {{ {k}, "{v[1]}" }},''', file=o)
else:
print(f' {{ {v[0]}, "{v[1]}" }},', file=o)
print(''' { 0, NULL }
};''', file=o)
if len(vs) > 7:
print(f'static value_string_ext {ident}_vals_ext = VALUE_STRING_EXT_INIT({ident}_vals);', file=o)
name2access[name] = f'&{ident}_vals_ext'
else:
name2access[name] = f'VALS({ident}_vals)'
return name2access
def get_fields(st, dt):
seen = {}
for name, e in st.items():
for m in e:
t = dt.get(m.get('type'))
if is_padding(t):
continue
if not (is_int(t) or is_fixed_string(t) or is_var_string(t)):
continue
name = m.get('name')
if name in seen:
if seen[name] != t:
raise RuntimeError(f'Mismatching type for: {name}')
else:
seen[name] = t
vs = list(seen.items())
vs.sort()
return vs
def gen_field_handles(st, dt, proto, o=sys.stdout):
print(f'''static expert_field ei_{proto}_counter_overflow = EI_INIT;
static expert_field ei_{proto}_invalid_template = EI_INIT;
static expert_field ei_{proto}_invalid_length = EI_INIT;''', file=o)
if not proto.startswith('eobi'):
print(f'static expert_field ei_{proto}_unaligned = EI_INIT;', file=o)
print(f'''static expert_field ei_{proto}_missing = EI_INIT;
static expert_field ei_{proto}_overused = EI_INIT;
''', file=o)
vs = get_fields(st, dt)
s = ', '.join('-1' for i in range(len(vs)))
print(f'static int hf_{proto}[] = {{ {s} }};', file=o)
print(f'''static int hf_{proto}_dscp_exec_summary = -1;
static int hf_{proto}_dscp_improved = -1;
static int hf_{proto}_dscp_widened = -1;''', file=o)
print('enum Field_Handle_Index {', file=o)
for i, (name, _) in enumerate(vs):
c = ' ' if i == 0 else ','
print(f' {c} {name.upper()}_FH_IDX', file=o)
print('};', file=o)
def type2ft(t):
if is_timestamp_ns(t):
return 'FT_ABSOLUTE_TIME'
if is_dscp(t):
return 'FT_UINT8'
if is_int(t):
if t.get('rootType') == 'String':
return 'FT_CHAR'
u = 'U' if is_unsigned(t) else ''
if t.get('size') is None:
raise RuntimeError(f'None size: {t.get("name")}')
size = int(t.get('size')) * 8
return f'FT_{u}INT{size}'
if is_fixed_string(t) or is_var_string(t):
# NB: technically, ETI fixed-strings are blank-padded,
# unless they are marked NO_VALUE, in that case
# the first byte is zero, followed by unspecified content.
# Also, some fixed-strings are zero-terminated, where again
# the bytes following the terminator are unspecified.
return 'FT_STRINGZTRUNC'
raise RuntimeError('unexpected type')
def type2enc(t):
if is_timestamp_ns(t):
return 'ABSOLUTE_TIME_UTC'
if is_dscp(t):
return 'BASE_HEX'
if is_int(t):
if t.get('rootType') == 'String':
# NB: basically only used when enum and value is unknown
return 'BASE_HEX'
else:
return 'BASE_DEC'
if is_fixed_string(t) or is_var_string(t):
# previously 'STR_ASCII', which was removed upstream
# cf. 19dcb725b61e384f665ad4b955f3b78f63e626d9
return 'BASE_NONE'
raise RuntimeError('unexpected type')
def gen_field_info(st, dt, n2enum, proto='eti', o=sys.stdout):
print(' static hf_register_info hf[] ={', file=o)
vs = get_fields(st, dt)
for i, (name, t) in enumerate(vs):
c = ' ' if i == 0 else ','
ft = type2ft(t)
enc = type2enc(t)
if is_enum(t) and not is_dscp(t):
vals = n2enum[t.get('name')]
if vals.startswith('&'):
extra_enc = '| BASE_EXT_STRING'
else:
extra_enc = ''
else:
vals = 'NULL'
extra_enc = ''
print(f''' {c} {{ &hf_{proto}[{name.upper()}_FH_IDX],
{{ "{name}", "{proto}.{name.lower()}",
{ft}, {enc}{extra_enc}, {vals}, 0x0,
NULL, HFILL }}
}}''', file=o)
print(f''' , {{ &hf_{proto}_dscp_exec_summary,
{{ "DSCP_ExecSummary", "{proto}.dscp_execsummary",
FT_BOOLEAN, 8, NULL, 0x10,
NULL, HFILL }}
}}
, {{ &hf_{proto}_dscp_improved,
{{ "DSCP_Improved", "{proto}.dscp_improved",
FT_BOOLEAN, 8, NULL, 0x20,
NULL, HFILL }}
}}
, {{ &hf_{proto}_dscp_widened,
{{ "DSCP_Widened", "{proto}.dscp_widened",
FT_BOOLEAN, 8, NULL, 0x40,
NULL, HFILL }}
}}''', file=o)
print(' };', file=o)
def gen_subtree_handles(st, proto='eti', o=sys.stdout):
ns = [ name for name, e in st.items() if e.get('type') != 'Message' ]
ns.sort()
s = ', '.join('-1' for i in range(len(ns) + 1))
h = dict( (n, i) for i, n in enumerate(ns, 1) )
print(f'static gint ett_{proto}[] = {{ {s} }};', file=o)
print(f'static gint ett_{proto}_dscp = -1;', file=o)
return h
def gen_subtree_array(st, proto='eti', o=sys.stdout):
n = sum(1 for name, e in st.items() if e.get('type') != 'Message')
n += 1
s = ', '.join(f'&ett_{proto}[{i}]' for i in range(n))
print(f' static gint * const ett[] = {{ {s}, &ett_{proto}_dscp }};', file=o)
def gen_fields_table(st, dt, sh, o=sys.stdout):
name2off = {}
off = 0
names = []
for name, e in st.items():
if e.get('type') == 'Message':
continue
if name.endswith('Comp'):
s = name[:-4]
name2off[name] = off
off += len(s) + 1
names.append(s)
s = '\\0'.join(names)
print(f' static const char struct_names[] = "{s}";', file=o)
xs = [ x for x in st.items() if x[1].get('type') != 'Message' ]
xs += [ x for x in st.items() if x[1].get('type') == 'Message' ]
print(' static const struct ETI_Field fields[] = {', file=o)
i = 0
fields2idx = {}
for name, e in xs:
fields2idx[name] = i
print(f' // {name}@{i}', file=o)
counters = {}
cnt = 0
for m in e:
t = dt.get(m.get('type'))
c = ' ' if i == 0 else ','
typ = ''
size = int(t.get('size')) if t is not None else 0
rep = ''
fh = f'{m.get("name").upper()}_FH_IDX'
sub = ''
if is_padding(t):
print(f' {c} {{ ETI_PADDING, 0, {size}, 0, 0 }}', file=o)
elif is_fixed_point(t):
if size != 8:
raise RuntimeError('only supporting 8 byte fixed point')
fraction = int(t.get('precision'))
if fraction > 16:
raise RuntimeError('unusual high precisio in fixed point')
print(f' {c} {{ ETI_FIXED_POINT, {fraction}, {size}, {fh}, 0 }}', file=o)
elif is_timestamp_ns(t):
if size != 8:
raise RuntimeError('only supporting timestamps')
print(f' {c} {{ ETI_TIMESTAMP_NS, 0, {size}, {fh}, 0 }}', file=o)
elif is_dscp(t):
print(f' {c} {{ ETI_DSCP, 0, {size}, {fh}, 0 }}', file=o)
elif is_int(t):
u = 'U' if is_unsigned(t) else ''
if t.get('rootType') == 'String':
typ = 'ETI_CHAR'
else:
typ = f'ETI_{u}INT'
if is_enum(t):
typ += '_ENUM'
if t.get('type') == 'Counter':
counters[m.get('name')] = cnt
suf = f' // <- counter@{cnt}'
if cnt > 7:
raise RuntimeError(f'too many counters in message: {name}')
rep = cnt
cnt += 1
if typ != 'ETI_UINT':
raise RuntimeError('only unsigned counters supported')
if size > 2:
raise RuntimeError('only smaller counters supported')
typ = 'ETI_COUNTER'
ett_idx = t.get('maxValue')
else:
rep = 0
suf = ''
ett_idx = 0
print(f' {c} {{ {typ}, {rep}, {size}, {fh}, {ett_idx} }}{suf}', file=o)
elif is_fixed_string(t):
print(f' {c} {{ ETI_STRING, 0, {size}, {fh}, 0 }}', file=o)
elif is_var_string(t):
k = m.get('counter')
x = counters[k]
print(f' {c} {{ ETI_VAR_STRING, {x}, {size}, {fh}, 0 }}', file=o)
else:
a = m.get('type')
fields_idx = fields2idx[a]
k = m.get('counter')
if k:
counter_off = counters[k]
typ = 'ETI_VAR_STRUCT'
else:
counter_off = 0
typ = 'ETI_STRUCT'
names_off = name2off[m.get('type')]
ett_idx = sh[a]
print(f' {c} {{ {typ}, {counter_off}, {names_off}, {fields_idx}, {ett_idx} }} // {m.get("name")}', file=o)
i += 1
print(' , { ETI_EOF, 0, 0, 0, 0 }', file=o)
i += 1
print(' };', file=o)
return fields2idx
def gen_template_table(min_templateid, n, ts, fields2idx, o=sys.stdout):
xs = [ '-1' ] * n
for tid, name in ts:
xs[tid - min_templateid] = f'{fields2idx[name]} /* {name} */'
s = '\n , '.join(xs)
print(f' static const int16_t tid2fidx[] = {{\n {s}\n }};', file=o)
def gen_sizes_table(min_templateid, n, st, dt, ts, proto, o=sys.stdout):
is_eobi = proto.startswith('eobi')
xs = [ '0' if is_eobi else '{ 0, 0}' ] * n
min_s = get_min_sizes(st, dt)
max_s = get_max_sizes(st, dt)
if is_eobi:
for tid, name in ts:
xs[tid - min_templateid] = f'{max_s[name]} /* {name} */'
else:
for tid, name in ts:
xs[tid - min_templateid] = f'{{ {min_s[name]}, {max_s[name]} }} /* {name} */'
s = '\n , '.join(xs)
if is_eobi:
print(f' static const uint32_t tid2size[] = {{\n {s}\n }};', file=o)
else:
print(f' static const uint32_t tid2size[{n}][2] = {{\n {s}\n }};', file=o)
# yes, usage attribute of single fields depends on the context
# otherwise, we could just put the information into the fields table
# Example: EOBI.PacketHeader.MessageHeader.MsgSeqNum is unused whereas
# it's required in the EOBI ExecutionSummary and other messages
def gen_usage_table(min_templateid, n, ts, ams, o=sys.stdout):
def map_usage(m):
x = m.get('usage')
if x == 'mandatory':
return 0
elif x == 'optional':
return 1
elif x == 'unused':
return 2
else:
raise RuntimeError(f'unknown usage value: {x}')
h = {}
i = 0
print(' static const unsigned char usages[] = {', file=o)
for am in ams:
name = am.get("name")
tid = int(am.get('numericID'))
print(f' // {name}', file=o)
h[tid] = i
for e in am:
if e.tag == 'Group':
print(f' //// {e.get("type")}', file=o)
for m in e:
if m.get('hidden') == 'true' or pad_re.match(m.get('name')):
continue
k = ' ' if i == 0 else ','
print(f' {k} {map_usage(m)} // {m.get("name")}#{i}', file=o)
i += 1
print(' ///', file=o)
else:
if e.get('hidden') == 'true' or pad_re.match(e.get('name')):
continue
k = ' ' if i == 0 else ','
print(f' {k} {map_usage(e)} // {e.get("name")}#{i}', file=o)
i += 1
# NB: the last element is a filler to simplify the out-of-bounds check
# (cf. the uidx DISSECTOR_ASSER_CMPUINIT() before the switch statement)
# when the ETI_EOF of the message whose usage information comes last
# is reached
print(f' , 0 // filler', file=o)
print(' };', file=o)
xs = [ '-1' ] * n
t2n = dict(ts)
for tid, uidx in h.items():
name = t2n[tid]
xs[tid - min_templateid] = f'{uidx} /* {name} */'
s = '\n , '.join(xs)
print(f' static const int16_t tid2uidx[] = {{\n {s}\n }};', file=o)
def gen_dscp_table(proto, o=sys.stdout):
print(f''' static int * const dscp_bits[] = {{
&hf_{proto}_dscp_exec_summary,
&hf_{proto}_dscp_improved,
&hf_{proto}_dscp_widened,
NULL
}};''', file=o)
def mk_int_case(size, signed, proto):
signed_str = 'i' if signed else ''
unsigned_str = '' if signed else 'u'
fmt_str = 'i' if signed else 'u'
if size == 2:
size_str = 's'
elif size == 4:
size_str = 'l'
elif size == 8:
size_str = '64'
type_str = f'g{unsigned_str}int{size * 8}'
no_value_str = f'INT{size * 8}_MIN' if signed else f'UINT{size * 8}_MAX'
pt_size = '64' if size == 8 else ''
if signed:
hex_str = '0x80' + '00' * (size - 1)
else:
hex_str = '0x' + 'ff' * size
if size == 1:
fn = f'tvb_get_g{unsigned_str}int8'
else:
fn = f'tvb_get_letoh{signed_str}{size_str}'
s = f'''case {size}:
{{
{type_str} x = {fn}(tvb, off);
if (x == {no_value_str}) {{
proto_item *e = proto_tree_add_{unsigned_str}int{pt_size}_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE ({hex_str})");
if (!usages[uidx])
expert_add_info_format(pinfo, e, &ei_{proto}_missing, "required value is missing");
}} else {{
proto_item *e = proto_tree_add_{unsigned_str}int{pt_size}_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%" PRI{fmt_str}{size * 8}, x);
if (usages[uidx] == 2)
expert_add_info_format(pinfo, e, &ei_{proto}_overused, "unused value is set");
}}
}}
break;'''
return s
def gen_dissect_structs(o=sys.stdout):
print('''
enum ETI_Type {
ETI_EOF,
ETI_PADDING,
ETI_UINT,
ETI_INT,
ETI_UINT_ENUM,
ETI_INT_ENUM,
ETI_COUNTER,
ETI_FIXED_POINT,
ETI_TIMESTAMP_NS,
ETI_CHAR,
ETI_STRING,
ETI_VAR_STRING,
ETI_STRUCT,
ETI_VAR_STRUCT,
ETI_DSCP
};
struct ETI_Field {
uint8_t type;
uint8_t counter_off; // offset into counter array
// if ETI_COUNTER => storage
// if ETI_VAR_STRING or ETI_VAR_STRUCT => load
// to get length or repeat count
// if ETI_FIXED_POINT: #fractional digits
uint16_t size; // or offset into struct_names if ETI_STRUCT/ETI_VAR_STRUCT
uint16_t field_handle_idx; // or index into fields array if ETI_STRUCT/ETI_VAR_STRUT
uint16_t ett_idx; // index into ett array if ETI_STRUCT/ETI_VAR_STRUCT
// or max value if ETI_COUNTER
};
''', file=o)
def gen_dissect_fn(st, dt, ts, sh, ams, proto, o=sys.stdout):
if proto.startswith('eti') or proto.startswith('xti'):
bl_fn = 'tvb_get_letohl'
template_off = 4
else:
bl_fn = 'tvb_get_letohs'
template_off = 2
print(f'''/* This method dissects fully reassembled messages */
static int
dissect_{proto}_message(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data _U_)
{{
col_set_str(pinfo->cinfo, COL_PROTOCOL, "{proto.upper()}");
col_clear(pinfo->cinfo, COL_INFO);
guint16 templateid = tvb_get_letohs(tvb, {template_off});
const char *template_str = val_to_str_ext(templateid, &template_id_vals_ext, "Unknown {proto.upper()} template: 0x%04x");
col_add_fstr(pinfo->cinfo, COL_INFO, "%s", template_str);
/* create display subtree for the protocol */
proto_item *ti = proto_tree_add_item(tree, proto_{proto}, tvb, 0, -1, ENC_NA);
guint32 bodylen= {bl_fn}(tvb, 0);
proto_item_append_text(ti, ", %s (%" PRIu16 "), BodyLen: %u", template_str, templateid, bodylen);
proto_tree *root = proto_item_add_subtree(ti, ett_{proto}[0]);
''', file=o)
min_templateid = ts[0][0]
max_templateid = ts[-1][0]
n = max_templateid - min_templateid + 1
fields2idx = gen_fields_table(st, dt, sh, o)
gen_template_table(min_templateid, n, ts, fields2idx, o)
gen_sizes_table(min_templateid, n, st, dt, ts, proto, o)
gen_usage_table(min_templateid, n, ts, ams, o)
gen_dscp_table(proto, o)
print(f''' if (templateid < {min_templateid} || templateid > {max_templateid}) {{
proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_template, tvb, {template_off}, 4,
"Template ID out of range: %" PRIu16, templateid);
return tvb_captured_length(tvb);
}}
int fidx = tid2fidx[templateid - {min_templateid}];
if (fidx == -1) {{
proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_template, tvb, {template_off}, 4,
"Unallocated Template ID: %" PRIu16, templateid);
return tvb_captured_length(tvb);
}}''', file=o)
if proto.startswith('eobi'):
print(f''' if (bodylen != tid2size[templateid - {min_templateid}]) {{
proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_length, tvb, 0, {template_off},
"Unexpected BodyLen value of %" PRIu32 ", expected: %" PRIu32, bodylen, tid2size[templateid - {min_templateid}]);
}}''', file=o)
else:
print(f''' if (bodylen < tid2size[templateid - {min_templateid}][0] || bodylen > tid2size[templateid - {min_templateid}][1]) {{
if (tid2size[templateid - {min_templateid}][0] != tid2size[templateid - {min_templateid}][1])
proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_length, tvb, 0, {template_off},
"Unexpected BodyLen value of %" PRIu32 ", expected: %" PRIu32 "..%" PRIu32, bodylen, tid2size[templateid - {min_templateid}][0], tid2size[templateid - {min_templateid}][1]);
else
proto_tree_add_expert_format(root, pinfo, &ei_{proto}_invalid_length, tvb, 0, {template_off},
"Unexpected BodyLen value of %" PRIu32 ", expected: %" PRIu32, bodylen, tid2size[templateid - {min_templateid}][0]);
}}
if (bodylen % 8)
proto_tree_add_expert_format(root, pinfo, &ei_{proto}_unaligned, tvb, 0, {template_off},
"BodyLen value of %" PRIu32 " is not divisible by 8", bodylen);
''', file=o)
print(f''' int uidx = tid2uidx[templateid - {min_templateid}];
DISSECTOR_ASSERT_CMPINT(uidx, >=, 0);
DISSECTOR_ASSERT_CMPUINT(((size_t)uidx), <, (sizeof usages / sizeof usages[0]));
''', file=o)
print(f''' int old_fidx = 0;
int old_uidx = 0;
unsigned top = 1;
unsigned counter[8] = {{0}};
unsigned off = 0;
unsigned struct_off = 0;
unsigned repeats = 0;
proto_tree *t = root;
while (top) {{
DISSECTOR_ASSERT_CMPINT(fidx, >=, 0);
DISSECTOR_ASSERT_CMPUINT(((size_t)fidx), <, (sizeof fields / sizeof fields[0]));
DISSECTOR_ASSERT_CMPINT(uidx, >=, 0);
DISSECTOR_ASSERT_CMPUINT(((size_t)uidx), <, (sizeof usages / sizeof usages[0]));
switch (fields[fidx].type) {{
case ETI_EOF:
DISSECTOR_ASSERT_CMPUINT(top, >=, 1);
DISSECTOR_ASSERT_CMPUINT(top, <=, 2);
if (t != root)
proto_item_set_len(t, off - struct_off);
if (repeats) {{
--repeats;
fidx = fields[old_fidx].field_handle_idx;
uidx = old_uidx;
t = proto_tree_add_subtree(root, tvb, off, -1, ett_{proto}[fields[old_fidx].ett_idx], NULL, &struct_names[fields[old_fidx].size]);
struct_off = off;
}} else {{
fidx = old_fidx + 1;
t = root;
--top;
}}
break;
case ETI_VAR_STRUCT:
case ETI_STRUCT:
DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <, sizeof counter / sizeof counter[0]);
repeats = fields[fidx].type == ETI_VAR_STRUCT ? counter[fields[fidx].counter_off] : 1;
if (repeats) {{
--repeats;
t = proto_tree_add_subtree(root, tvb, off, -1, ett_{proto}[fields[fidx].ett_idx], NULL, &struct_names[fields[fidx].size]);
struct_off = off;
old_fidx = fidx;
old_uidx = uidx;
fidx = fields[fidx].field_handle_idx;
DISSECTOR_ASSERT_CMPUINT(top, ==, 1);
++top;
}} else {{
++fidx;
}}
break;
case ETI_PADDING:
off += fields[fidx].size;
++fidx;
break;
case ETI_CHAR:
proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_ASCII);
off += fields[fidx].size;
++fidx;
++uidx;
break;
case ETI_STRING:
{{
guint8 c = tvb_get_guint8(tvb, off);
if (c)
proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_ASCII);
else {{
proto_item *e = proto_tree_add_string(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, "NO_VALUE ('0x00...')");
if (!usages[uidx])
expert_add_info_format(pinfo, e, &ei_{proto}_missing, "required value is missing");
}}
}}
off += fields[fidx].size;
++fidx;
++uidx;
break;
case ETI_VAR_STRING:
DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <, sizeof counter / sizeof counter[0]);
proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, counter[fields[fidx].counter_off], ENC_ASCII);
off += counter[fields[fidx].counter_off];
++fidx;
++uidx;
break;
case ETI_COUNTER:
DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <, sizeof counter / sizeof counter[0]);
DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, <=, 2);
{{
switch (fields[fidx].size) {{
case 1:
{{
guint8 x = tvb_get_guint8(tvb, off);
if (x == UINT8_MAX) {{
proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE (0xff)");
counter[fields[fidx].counter_off] = 0;
}} else {{
proto_item *e = proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%" PRIu8, x);
if (x > fields[fidx].ett_idx) {{
counter[fields[fidx].counter_off] = fields[fidx].ett_idx;
expert_add_info_format(pinfo, e, &ei_{proto}_counter_overflow, "Counter overflow: %" PRIu8 " > %" PRIu16, x, fields[fidx].ett_idx);
}} else {{
counter[fields[fidx].counter_off] = x;
}}
}}
}}
break;
case 2:
{{
guint16 x = tvb_get_letohs(tvb, off);
if (x == UINT16_MAX) {{
proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE (0xffff)");
counter[fields[fidx].counter_off] = 0;
}} else {{
proto_item *e = proto_tree_add_uint_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%" PRIu16, x);
if (x > fields[fidx].ett_idx) {{
counter[fields[fidx].counter_off] = fields[fidx].ett_idx;
expert_add_info_format(pinfo, e, &ei_{proto}_counter_overflow, "Counter overflow: %" PRIu16 " > %" PRIu16, x, fields[fidx].ett_idx);
}} else {{
counter[fields[fidx].counter_off] = x;
}}
}}
}}
break;
}}
}}
off += fields[fidx].size;
++fidx;
++uidx;
break;
case ETI_UINT:
switch (fields[fidx].size) {{
{mk_int_case(1, False, proto)}
{mk_int_case(2, False, proto)}
{mk_int_case(4, False, proto)}
{mk_int_case(8, False, proto)}
}}
off += fields[fidx].size;
++fidx;
++uidx;
break;
case ETI_INT:
switch (fields[fidx].size) {{
{mk_int_case(1, True, proto)}
{mk_int_case(2, True, proto)}
{mk_int_case(4, True, proto)}
{mk_int_case(8, True, proto)}
}}
off += fields[fidx].size;
++fidx;
++uidx;
break;
case ETI_UINT_ENUM:
case ETI_INT_ENUM:
proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_LITTLE_ENDIAN);
off += fields[fidx].size;
++fidx;
++uidx;
break;
case ETI_FIXED_POINT:
DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, ==, 8);
DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, >, 0);
DISSECTOR_ASSERT_CMPUINT(fields[fidx].counter_off, <=, 16);
{{
gint64 x = tvb_get_letohi64(tvb, off);
if (x == INT64_MIN) {{
proto_item *e = proto_tree_add_int64_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "NO_VALUE (0x8000000000000000)");
if (!usages[uidx])
expert_add_info_format(pinfo, e, &ei_{proto}_missing, "required value is missing");
}} else {{
unsigned slack = fields[fidx].counter_off + 1;
if (x < 0)
slack += 1;
char s[21];
int n = snprintf(s, sizeof s, "%0*" PRIi64, slack, x);
DISSECTOR_ASSERT_CMPUINT(n, >, 0);
unsigned k = n - fields[fidx].counter_off;
proto_tree_add_int64_format_value(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, x, "%.*s.%s", k, s, s + k);
}}
}}
off += fields[fidx].size;
++fidx;
++uidx;
break;
case ETI_TIMESTAMP_NS:
DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, ==, 8);
proto_tree_add_item(t, hf_{proto}[fields[fidx].field_handle_idx], tvb, off, fields[fidx].size, ENC_LITTLE_ENDIAN | ENC_TIME_NSECS);
off += fields[fidx].size;
++fidx;
++uidx;
break;
case ETI_DSCP:
DISSECTOR_ASSERT_CMPUINT(fields[fidx].size, ==, 1);
proto_tree_add_bitmask(t, tvb, off, hf_{proto}[fields[fidx].field_handle_idx], ett_{proto}_dscp, dscp_bits, ENC_LITTLE_ENDIAN);
off += fields[fidx].size;
++fidx;
++uidx;
break;
}}
}}
''', file=o)
print(''' return tvb_captured_length(tvb);
}
''', file=o)
print(f'''/* determine PDU length of protocol {proto.upper()} */
static guint
get_{proto}_message_len(packet_info *pinfo _U_, tvbuff_t *tvb, int offset, void *data _U_)
{{
return (guint){bl_fn}(tvb, offset);
}}
''', file=o)
if proto.startswith('eobi'):
print(f'''static int
dissect_{proto}(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
void *data)
{{
return udp_dissect_pdus(tvb, pinfo, tree, 4, NULL,
get_{proto}_message_len, dissect_{proto}_message, data);
}}
''', file=o)
else:
print(f'''static int
dissect_{proto}(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
void *data)
{{
tcp_dissect_pdus(tvb, pinfo, tree, TRUE, 4 /* bytes to read for bodylen */,
get_{proto}_message_len, dissect_{proto}_message, data);
return tvb_captured_length(tvb);
}}
''', file=o)
def gen_register_fn(st, dt, n2enum, proto, desc, o=sys.stdout):
print(f'''void
proto_register_{proto}(void)
{{''', file=o)
gen_field_info(st, dt, n2enum, proto, o)
print(f''' static ei_register_info ei[] = {{
{{
&ei_{proto}_counter_overflow,
{{ "{proto}.counter_overflow", PI_PROTOCOL, PI_WARN, "Counter Overflow", EXPFILL }}
}},
{{
&ei_{proto}_invalid_template,
{{ "{proto}.invalid_template", PI_PROTOCOL, PI_ERROR, "Invalid Template ID", EXPFILL }}
}},
{{
&ei_{proto}_invalid_length,
{{ "{proto}.invalid_length", PI_PROTOCOL, PI_ERROR, "Invalid Body Length", EXPFILL }}
}},''', file=o)
if not proto.startswith('eobi'):
print(f''' {{
&ei_{proto}_unaligned,
{{ "{proto}.unaligned", PI_PROTOCOL, PI_ERROR, "A Body Length not divisible by 8 leads to unaligned followup messages", EXPFILL }}
}},''', file=o)
print(f''' {{
&ei_{proto}_missing,
{{ "{proto}.missing", PI_PROTOCOL, PI_WARN, "A required value is missing", EXPFILL }}
}},
{{
&ei_{proto}_overused,
{{ "{proto}.overused", PI_PROTOCOL, PI_WARN, "An unused value is set", EXPFILL }}
}}
}};''', file=o)
print(f''' proto_{proto} = proto_register_protocol("{desc}",
"{proto.upper()}", "{proto}");''', file=o)
print(f''' expert_module_t *expert_{proto} = expert_register_protocol(proto_{proto});
expert_register_field_array(expert_{proto}, ei, array_length(ei));''', file=o)
print(f' proto_register_field_array(proto_{proto}, hf, array_length(hf));',
file=o)
gen_subtree_array(st, proto, o)
print(' proto_register_subtree_array(ett, array_length(ett));', file=o)
if proto.startswith('eobi'):
print(f' proto_disable_by_default(proto_{proto});', file=o)
print('}\n', file=o)
def gen_handoff_fn(proto, o=sys.stdout):
print(f'''void
proto_reg_handoff_{proto}(void)
{{
dissector_handle_t {proto}_handle = create_dissector_handle(dissect_{proto},
proto_{proto});
// cf. N7 Network Access Guide, e.g.
// https://www.xetra.com/xetra-en/technology/t7/system-documentation/release10-0/Release-10.0-2692700?frag=2692724
// https://www.xetra.com/resource/blob/2762078/388b727972b5122945eedf0e63c36920/data/N7-Network-Access-Guide-v2.0.59.pdf
''', file=o)
if proto.startswith('eti'):
print(f''' // NB: can only be called once for a port/handle pair ...
// dissector_add_uint_with_preference("tcp.port", 19006 /* LF PROD */, eti_handle);
dissector_add_uint("tcp.port", 19006 /* LF PROD */, {proto}_handle);
dissector_add_uint("tcp.port", 19043 /* PS PROD */, {proto}_handle);
dissector_add_uint("tcp.port", 19506 /* LF SIMU */, {proto}_handle);
dissector_add_uint("tcp.port", 19543 /* PS SIMU */, {proto}_handle);''', file=o)
elif proto.startswith('xti'):
print(f''' // NB: unfortunately, Cash-ETI shares the same ports as Derivatives-ETI ...
// We thus can't really add a well-know port for XTI.
// Use Wireshark's `Decode As...` or tshark's `-d tcp.port=19043,xti` feature
// to switch from ETI to XTI dissection.
dissector_add_uint_with_preference("tcp.port", 19042 /* dummy */, {proto}_handle);''', file=o)
else:
print(f''' static const int ports[] = {{
59000, // Snapshot EUREX US-allowed PROD
59001, // Incremental EUREX US-allowed PROD
59032, // Snapshot EUREX US-restricted PROD
59033, // Incremental EUREX US-restricted PROD
59500, // Snapshot EUREX US-allowed SIMU
59501, // Incremental EUREX US-allowed SIMU
59532, // Snapshot EUREX US-restricted SIMU
59533, // Incremental EUREX US-restricted SIMU
57000, // Snapshot FX US-allowed PROD
57001, // Incremental FX US-allowed PROD
57032, // Snapshot FX US-restricted PROD
57033, // Incremental FX US-restricted PROD
57500, // Snapshot FX US-allowed SIMU
57501, // Incremental FX US-allowed SIMU
57532, // Snapshot FX US-restricted SIMU
57533, // Incremental FX US-restricted SIMU
59000, // Snapshot Xetra PROD
59001, // Incremental Xetra PROD
59500, // Snapshot Xetra SIMU
59501, // Incremental Xetra SIMU
56000, // Snapshot Boerse Frankfurt PROD
56001, // Incremental Boerse Frankfurt PROD
56500, // Snapshot Boerse Frankfurt SIMU
56501 // Incremental Boerse Frankfurt SIMU
}};
for (unsigned i = 0; i < sizeof ports / sizeof ports[0]; ++i)
dissector_add_uint("udp.port", ports[i], {proto}_handle);''', file=o)
print('}', file=o)
def is_int(t):
if t is not None:
r = t.get('rootType')
return r in ('int', 'floatDecimal') or (r == 'String' and t.get('size') == '1')
return False
def is_enum(t):
if t is not None:
r = t.get('rootType')
if r == 'int' or (r == 'String' and t.get('size') == '1'):
return t.find('ValidValue') is not None
return False
def is_fixed_point(t):
return t is not None and t.get('rootType') == 'floatDecimal'
def is_timestamp_ns(t):
return t is not None and t.get('type') == 'UTCTimestamp'
def is_dscp(t):
return t is not None and t.get('name') == 'DSCP'
pad_re = re.compile('Pad[1-9]')
def is_padding(t):
if t is not None:
return t.get('rootType') == 'String' and pad_re.match(t.get('name'))
return False
def is_fixed_string(t):
if t is not None:
return t.get('rootType') in ('String', 'data') and not t.get('variableSize')
return False
def is_var_string(t):
if t is not None:
return t.get('rootType') in ('String', 'data') and t.get('variableSize') is not None
return False
def is_unsigned(t):
v = t.get('minValue')
return v is not None and not v.startswith('-')
def is_counter(t):
return t.get('type') == 'Counter'
def type_to_fmt(t):
if is_padding(t):
return f'{t.get("size")}x'
elif is_int(t):
n = int(t.get('size'))
if n == 1:
return 'B'
else:
if n == 2:
c = 'h'
elif n == 4:
c = 'i'
elif n == 8:
c = 'q'
else:
raise ValueError(f'unknown int size {n}')
if is_unsigned(t):
c = c.upper()
return c
elif is_fixed_string(t):
return f'{t.get("size")}s'
else:
return '?'
def pp_int_type(t):
if not is_int(t):
return None
s = 'i'
if is_unsigned(t):
s = 'u'
n = int(t.get('size'))
s += str(n)
return s
def is_elementary(t):
return t is not None and t.get('counter') is None
def group_members(e, dt):
xs = []
ms = []
for m in e:
t = dt.get(m.get('type'))
if is_elementary(t):
ms.append(m)
else:
if ms:
xs.append(ms)
ms = []
xs.append([m])
if ms:
xs.append(ms)
return xs
def parse_args():
p = argparse.ArgumentParser(description='Generate Wireshark Dissector for ETI/EOBI style protocol specifictions')
p.add_argument('filename', help='protocol description XML file')
p.add_argument('--proto', default='eti',
help='short protocol name (default: %(default)s)')
p.add_argument('--desc', '-d',
default='Enhanced Trading Interface',
help='protocol description (default: %(default)s)')
p.add_argument('--output', '-o', default='-',
help='output filename (default: stdout)')
args = p.parse_args()
return args
def main():
args = parse_args()
filename = args.filename
d = ET.parse(filename)
o = sys.stdout if args.output == '-' else open(args.output, 'w')
proto = args.proto
version = (d.getroot().get('version'), d.getroot().get('subVersion'))
desc = f'{args.desc} {version[0]}'
dt = get_data_types(d)
st = get_structs(d)
used = get_used_types(st)
for k in list(dt.keys()):
if k not in used:
del dt[k]
ts = get_templates(st)
ams = d.getroot().find('ApplicationMessages')
gen_header(proto, desc, o)
print(f'static int proto_{proto} = -1;', file=o)
gen_field_handles(st, dt, proto, o)
n2enum = gen_enums(dt, ts, o)
gen_dissect_structs(o)
sh = gen_subtree_handles(st, proto, o)
gen_dissect_fn(st, dt, ts, sh, ams, proto, o)
gen_register_fn(st, dt, n2enum, proto, desc, o)
gen_handoff_fn(proto, o)
if __name__ == '__main__':
sys.exit(main()) |
Perl | wireshark/tools/extract_asn1_from_spec.pl | #!/usr/bin/perl
#
# This script extracts the ASN1 definition from TS 36.331/36.355/25.331/38.331/37.355/36.413/38.413/36.423/38.423
# /38.463/38.473 , and generates asn files that can be processed by asn2wrs
# First download the specification from 3gpp.org as a word document and open it
# Then in "view" menu, select normal, draft or web layout (any kind that removes page header and footers)
# Finally save the document as a text file
# Example with TS 36.331: "perl extract_asn1_from_spec.pl 36331-xxx.txt"
# It should generate: EUTRA-RRC-Definitions.asn, EUTRA-UE-Variables.asn and EUTRA-InterNodeDefinitions
#
# Copyright 2011 Vincent Helfre and Erwan Yvin
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
use warnings;
$input_file = $ARGV[0];
$version = 0;
sub extract_spec_version;
sub extract_asn1;
open(INPUT_FILE, "< $input_file") or die "Can not open file $input_file";
extract_spec_version();
extract_asn1();
close(INPUT_FILE);
# This subroutine extracts the version of the specification
sub extract_spec_version {
my $line;
while($line = <INPUT_FILE>){
if($line =~ m/3GPP TS ((25|36|38)\.331|(36|37)\.355|(36|38)\.413|(36|38)\.423|36\.(443|444)|(36|38)\.455|38\.463|38\.473|37\.483) V/){
$version = $line;
return;
}
}
}
# This subroutine copies the text delimited by -- ASN1START and -- ASN1STOP in INPUT_FILE
# and copies it into OUTPUT_FILE.
# The OUTPUT_FILE is opened on encounter of the keyword "DEFINITIONS AUTOMATIC TAGS"
# and closed on encounter of the keyword "END"
sub extract_asn1 {
my $line;
my $prev_line;
my $is_asn1 = 0;
my $output_file_name = 0;
my $file_name_found = 0;
while($line = <INPUT_FILE>){
if ($line =~ m/-- ASN1STOP/) {
$is_asn1 = 0;
}
if(($file_name_found == 0) && ($line =~ m/^LPP-PDU-Definitions/)){
$output_file_name = "LPP-PDU-Definitions.asn";
print "generating $output_file_name\n";
open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name";
$file_name_found = 1;
syswrite OUTPUT_FILE,"-- "."$version"."\n";
}
if(($file_name_found == 0) && ($line =~ m/^LPP-Broadcast-Definitions/)){
$output_file_name = "LPP-Broadcast-Definitions.asn";
print "generating $output_file_name\n";
open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name";
$file_name_found = 1;
syswrite OUTPUT_FILE,"-- "."$version"."\n";
}
if(($file_name_found == 0) && ($line =~ m/SonTransfer-IEs/)){
$output_file_name = "S1AP-SonTransfer-IEs.asn";
print "generating $output_file_name\n";
open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name";
$is_asn1 = 1;
$file_name_found = 1;
syswrite OUTPUT_FILE,"-- "."$version"."\n";
}
if(($file_name_found == 0) && ($line =~ m/itu-t \(0\) identified-organization \(4\) etsi \(0\) mobileDomain \(0\)/)){
($output_file_name) = ($prev_line =~ m/^([a-zA-Z0-9\-]+)\s/);
$output_file_name = "$output_file_name".".asn";
print "generating $output_file_name\n";
open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name";
$is_asn1 = 1;
$file_name_found = 1;
syswrite OUTPUT_FILE,"-- "."$version"."\n";
syswrite OUTPUT_FILE,"$prev_line";
}
if(($file_name_found == 0) && ($line =~ m/DEFINITIONS AUTOMATIC TAGS ::=/)){
($output_file_name) = ($line =~ m/^([a-zA-Z0-9\-]+)\s+DEFINITIONS AUTOMATIC TAGS ::=/);
$output_file_name = "$output_file_name".".asn";
print "generating $output_file_name\n";
open(OUTPUT_FILE, "> $output_file_name") or die "Can not open file $output_file_name";
$is_asn1 = 1;
$file_name_found = 1;
syswrite OUTPUT_FILE,"-- "."$version"."\n";
}
if (($line =~ /^END[\r\n]/) && (defined fileno OUTPUT_FILE)){
syswrite OUTPUT_FILE,"$line";
close(OUTPUT_FILE);
$is_asn1 = 0;
$file_name_found = 0;
}
if (($is_asn1 == 1) && (defined fileno OUTPUT_FILE)){
syswrite OUTPUT_FILE,"$line";
}
if ($line =~ m/-- ASN1START/) {
$is_asn1 = 1;
}
$prev_line = $line;
}
} |
Perl | wireshark/tools/fix-encoding-args.pl | #!/usr/bin/env perl
#
# Copyright 2011, William Meier <wmeier[AT]newsguy.com>
#
# A program to fix encoding args for certain Wireshark API function calls
# from TRUE/FALSE to ENC_?? as appropriate (and possible)
# - proto_tree_add_item
# - proto_tree_add_bits_item
# - proto_tree_add_bits_ret_val
# - proto_tree_add_bitmask
# - proto_tree_add_bitmask_text !! ToDo: encoding arg not last arg
# - tvb_get_bits
# - tvb_get_bits16
# - tvb_get_bits24
# - tvb_get_bits32
# - tvb_get_bits64
# - ptvcursor_add
# - ptvcursor_add_no_advance
# - ptvcursor_add_with_subtree !! ToDo: encoding arg not last arg
#
# ToDo: Rework program so that it can better be used to *validate* encoding-args
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
use strict;
use warnings;
use Getopt::Long;
# Conversion "Requests"
# Standard conversions
my $searchReplaceFalseTrueHRef =
{
"FALSE" => "ENC_BIG_ENDIAN",
"0" => "ENC_BIG_ENDIAN",
"TRUE" => "ENC_LITTLE_ENDIAN",
"1" => "ENC_LITTLE_ENDIAN"
};
my $searchReplaceEncNAHRef =
{
"FALSE" => "ENC_NA",
"0" => "ENC_NA",
"TRUE" => "ENC_NA",
"1" => "ENC_NA",
"ENC_LITTLE_ENDIAN" => "ENC_NA",
"ENC_BIG_ENDIAN" => "ENC_NA",
"ENC_ASCII|ENC_NA" => "ENC_NA",
"ENC_ASCII | ENC_NA" => "ENC_NA"
};
my $searchReplaceDissectorTable =
{
"FALSE" => "STRING_CASE_SENSITIVE",
"0" => "STRING_CASE_SENSITIVE",
"BASE_NONE" => "STRING_CASE_SENSITIVE",
"TRUE" => "STRING_CASE_INSENSITIVE",
"1" => "STRING_CASE_INSENSITIVE"
};
# ---------------------------------------------------------------------
# Conversion "request" structure
# (
# [ <list of field types for which this conversion request applies> ],
# { <hash of desired encoding arg conversions> }
# }
my @types_NA =
(
[ qw (FT_NONE FT_BYTES FT_ETHER FT_IPv6 FT_IPXNET FT_OID FT_REL_OID)],
$searchReplaceEncNAHRef
);
my @types_INT =
(
[ qw (FT_UINT8 FT_UINT16 FT_UINT24 FT_UINT32 FT_UINT64 FT_INT8
FT_INT16 FT_INT24 FT_INT32 FT_INT64 FT_FLOAT FT_DOUBLE)],
$searchReplaceFalseTrueHRef
);
my @types_MISC =
(
[ qw (FT_BOOLEAN FT_IPv4 FT_GUID FT_EUI64)],
$searchReplaceFalseTrueHRef
);
my @types_STRING =
(
[qw (FT_STRING FT_STRINGZ)],
{
"FALSE" => "ENC_ASCII",
"0" => "ENC_ASCII",
"TRUE" => "ENC_ASCII",
"1" => "ENC_ASCII",
"ENC_LITTLE_ENDIAN" => "ENC_ASCII",
"ENC_BIG_ENDIAN" => "ENC_ASCII",
"ENC_NA" => "ENC_ASCII",
"ENC_ASCII|ENC_LITTLE_ENDIAN" => "ENC_ASCII",
"ENC_ASCII|ENC_BIG_ENDIAN" => "ENC_ASCII",
"ENC_UTF_8|ENC_LITTLE_ENDIAN" => "ENC_UTF_8",
"ENC_UTF_8|ENC_BIG_ENDIAN" => "ENC_UTF_8",
"ENC_EBCDIC|ENC_LITTLE_ENDIAN" => "ENC_EBCDIC",
"ENC_EBCDIC|ENC_BIG_ENDIAN" => "ENC_EBCDIC",
}
);
my @types_UINT_STRING =
(
[qw (FT_UINT_STRING)],
{
"FALSE" => "ENC_ASCII|ENC_BIG_ENDIAN",
"0" => "ENC_ASCII|ENC_BIG_ENDIAN",
"TRUE" => "ENC_ASCII|ENC_LITTLE_ENDIAN",
"1" => "ENC_ASCII|ENC_LITTLE_ENDIAN",
"ENC_BIG_ENDIAN" => "ENC_ASCII|ENC_BIG_ENDIAN",
"ENC_LITTLE_ENDIAN" => "ENC_ASCII|ENC_LITTLE_ENDIAN",
"ENC_ASCII|ENC_NA" => "ENC_ASCII|ENC_BIG_ENDIAN",
"ENC_ASCII" => "ENC_ASCII|ENC_BIG_ENDIAN",
"ENC_NA" => "ENC_ASCII|ENC_BIG_ENDIAN"
}
);
my @types_REG_PROTO =
(
[ qw (REG_PROTO)],
$searchReplaceEncNAHRef
);
# ---------------------------------------------------------------------
my @findAllFunctionList =
## proto_tree_add_bitmask_text !! ToDo: encoding arg not last arg
## ptvcursor_add_with_subtree !! ToDo: encoding Arg not last arg
qw (
proto_tree_add_item
proto_tree_add_bits_item
proto_tree_add_bits_ret_val
proto_tree_add_bitmask
proto_tree_add_bitmask_with_flags
tvb_get_bits
tvb_get_bits16
tvb_get_bits24
tvb_get_bits32
tvb_get_bits64
ptvcursor_add
ptvcursor_add_no_advance
register_dissector_table
);
# ---------------------------------------------------------------------
#
# MAIN
#
my $writeFlag = '';
my $helpFlag = '';
my $action = 'fix-all';
my $result = GetOptions(
'action=s' => \$action,
'write' => \$writeFlag,
'help|?' => \$helpFlag
);
if (!$result || $helpFlag || !$ARGV[0]) {
usage();
}
if (($action ne 'fix-all') && ($action ne 'find-all')) {
usage();
}
sub usage {
print "\nUsage: $0 [--action=fix-all|find-all] [--write] FILENAME [...]\n\n";
print " --action = fix-all (default)\n";
print " Fix <certain-fcn-names>() encoding arg when possible in FILENAME(s)\n";
print " Fixes (if any) are listed on stdout)\n\n";
print " --write create FILENAME.encoding-arg-fixes (original file with fixes)\n";
print " (effective only for fix-all)\n";
print "\n";
print " --action = find-all\n";
print " Find all occurrences of <certain-fcn-names>() statements)\n";
print " highlighting the 'encoding' arg\n";
exit(1);
}
# Read through the files; fix up encoding parameter of proto_tree_add_item() calls
# Essentially:
# For each file {
# . Create a hash of the hf_index_names & associated field types from the entries in hf[]
# . For each requested "conversion request" {
# . . For each hf[] entry hf_index_name with a field type in a set of specified field types {
# . . . For each proto_tree_add_item() statement
# . . . . - replace encoding arg in proto_tree_add_item(..., hf_index_name, ..., 'encoding-arg')
# specific values ith new values
# . . . . - print the statement showing the change
# . . . }
# . . }
# . }
# . If requested and if replacements done: write new file "orig-filename.encoding-arg-fixes"
# }
#
# Note: The proto_tree_add_item() encoding arg will be converted only if
# the hf_index_name referenced is in one of the entries in hf[] in the same file
my $found_total = 0;
while (my $fileName = $ARGV[0]) {
shift;
my $fileContents = '';
die "No such file: \"$fileName\"\n" if (! -e $fileName);
# delete leading './'
$fileName =~ s{ ^ \. / } {}xo;
##print "$fileName\n";
# Read in the file (ouch, but it's easier that way)
open(FCI, "<", $fileName) || die("Couldn't open $fileName");
while (<FCI>) {
$fileContents .= $_;
}
close(FCI);
# Create a hash of the hf[] entries (name_index_name=>field_type)
my $hfArrayEntryFieldTypeHRef = find_hf_array_entries(\$fileContents, $fileName);
if ($action eq "fix-all") {
# Find and replace: <fcn_name_pattern>() encoding arg in $fileContents for:
# - hf[] entries with specified field types;
# - 'proto' as returned from proto_register_protocol()
my $fcn_name = "(?:proto_tree_add_item|ptvcursor_add(?:_no_advance)?)";
my $found = 0;
$found += fix_encoding_args_by_hf_type(1, \@types_NA, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
$found += fix_encoding_args_by_hf_type(1, \@types_INT, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
$found += fix_encoding_args_by_hf_type(1, \@types_MISC, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
$found += fix_encoding_args_by_hf_type(1, \@types_STRING, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
$found += fix_encoding_args_by_hf_type(1, \@types_UINT_STRING, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
$found += fix_encoding_args_by_hf_type(1, \@types_REG_PROTO, $fcn_name, \$fileContents, $hfArrayEntryFieldTypeHRef, $fileName);
# Find and replace: alters <fcn_name>() encoding arg in $fileContents
$found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "proto_tree_add_bits_(?:item|ret_val)", \$fileContents, $fileName);
$found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "proto_tree_add_bitmask", \$fileContents, $fileName);
$found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "proto_tree_add_bitmask_with_flags", \$fileContents, $fileName);
$found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "tvb_get_bits(?:16|24|32|64)?", \$fileContents, $fileName);
$found += fix_encoding_args(1, $searchReplaceFalseTrueHRef, "tvb_get_(?:ephemeral_)?unicode_string[z]?", \$fileContents, $fileName);
$found += fix_dissector_table_args(1, $searchReplaceDissectorTable, "register_dissector_table", \$fileContents, $fileName);
# If desired and if any changes, write out the changed version to a file
if (($writeFlag) && ($found > 0)) {
open(FCO, ">", $fileName . ".encoding-arg-fixes");
# open(FCO, ">", $fileName );
print FCO "$fileContents";
close(FCO);
}
$found_total += $found;
}
if ($action eq "find-all") {
# Find all proto_tree_add_item() statements
# and output same highlighting the encoding arg
$found_total += find_all(\@findAllFunctionList, \$fileContents, $fileName);
}
} # while
exit $found_total;
# ---------------------------------------------------------------------
# Create a hash containing an entry (hf_index_name => field_type) for each hf[]entry.
# also: create an entry in the hash for the 'protocol name' variable (proto... => FT_PROTOCOL)
# returns: ref to the hash
sub find_hf_array_entries {
my ($fileContentsRef, $fileName) = @_;
# The below Regexp is based on one from:
# https://web.archive.org/web/20080614012925/http://aspn.activestate.com/ASPN/Cookbook/Rx/Recipe/59811
# It is in the public domain.
# A complicated regex which matches C-style comments.
my $CCommentRegEx = qr{ / [*] [^*]* [*]+ (?: [^/*] [^*]* [*]+ )* / }xo;
# hf[] entry regex (to extract an hf_index_name and associated field type)
my $hfArrayFieldTypeRegEx = qr {
\{
\s*
&\s*([A-Z0-9_\[\]-]+) # &hf
\s*,\s*
\{\s*
.+? # (a bit dangerous)
\s*,\s*
(FT_[A-Z0-9_]+) # field type
\s*,\s*
.+?
\s*,\s*
HFILL # HFILL
}xios;
# create a copy of $fileContents with comments removed
my $fileContentsWithoutComments = $$fileContentsRef;
$fileContentsWithoutComments =~ s {$CCommentRegEx} []xg;
# find all the hf[] entries (searching $fileContentsWithoutComments).
# Create a hash keyed by the hf_index_name with the associated value being the field_type
my %hfArrayEntryFieldType;
while ($fileContentsWithoutComments =~ m{ $hfArrayFieldTypeRegEx }xgis) {
# print "$1 $2\n";
if (exists $hfArrayEntryFieldType{$1}) {
printf "%-35.35s: ? duplicate hf[] entry: no fixes done for: $1; manual action may be req'd\n", $fileName;
$hfArrayEntryFieldType{$1} = "???"; # prevent any substitutions for this hf_index_name
} else {
$hfArrayEntryFieldType{$1} = $2;
}
}
# pre-process contents to fold multiple lines and speed up matching.
$fileContentsWithoutComments =~ s/\s*=\s*/=/gs;
$fileContentsWithoutComments =~ s/^\s+//g;
# RegEx to get "proto" variable name
my $protoRegEx = qr /
^ # note m modifier below
(
[a-zA-Z0-9_]+
)
=
proto_register_protocol\b
/xom;
# Find all registered protocols
while ($fileContentsWithoutComments =~ m { $protoRegEx }xgom ) {
##print "$1\n";
if (exists $hfArrayEntryFieldType{$1}) {
printf "%-35.35s: ? duplicate 'proto': no fixes done for: $1; manual action may be req'd\n", $fileName;
$hfArrayEntryFieldType{$1} = "???"; # prevent any substitutions for this protocol
} else {
$hfArrayEntryFieldType{$1} = "REG_PROTO";
}
}
return \%hfArrayEntryFieldType;
}
# ---------------------------------------------------------------------
# fix_encoding_args
# Substitute new values for the specified <fcn_name>() encoding arg values
# when the encoding arg is the *last* arg of the call to fcn_name
# args:
# substitute_flag: 1: replace specified encoding arg values by a new value (keys/values in search hash);
# ref to hash containing search (keys) and replacement (values) for encoding arg
# fcn_name string
# ref to string containing file contents
# filename string
#
{ # block begin
# shared variables
my $fileName;
my $searchReplaceHRef;
my $found;
sub fix_encoding_args {
(my $subFlag, $searchReplaceHRef, my $fcn_name, my $fileContentsRef, $fileName) = @_;
my $encArgPat;
if ($subFlag == 1) {
# just match for <fcn_name>() statements which have an encoding arg matching one of the
# keys in the searchReplace hash.
# Escape any "|" characters in the keys
# and then create "alternatives" string containing all the resulting key strings. Ex: "(A|B|C\|D|..."
$encArgPat = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } keys %$searchReplaceHRef;
} elsif ($subFlag == 3) {
# match for <fcn_name>() statements for any value of the encoding parameter
# IOW: find all the <fcn_name> statements
$encArgPat = qr / [^,)]+? /x;
}
# build the complete pattern
my $patRegEx = qr /
# part 1: $1
(
(?:^|=) # don't try to handle fcn_name call when arg of another fcn call
\s*
$fcn_name \s* \(
[^;]+? # a bit dangerous
,\s*
)
# part 2: $2
# exact match of pattern (including spaces)
((?-x)$encArgPat)
# part 3: $3
(
\s* \)
\s* ;
)
/xms; # m for ^ above
##print "$patRegEx\n";
## Match and substitute as specified
$found = 0;
$$fileContentsRef =~ s/ $patRegEx /patsubx($1,$2,$3)/xges;
return $found;
}
# Called from fix_encoding_args to determine replacement string when a regex match is encountered
# $_[0]: part 1
# $_[1]: part 2: encoding arg
# $_[2]: part 3
# lookup the desired replacement value for the encoding arg
# print match string showing and highlighting the encoding arg replacement
# return "replacement" string
sub patsubx {
$found += 1;
my $substr = exists $$searchReplaceHRef{$_[1]} ? $$searchReplaceHRef{$_[1]} : "???";
my $str = sprintf("%s[[%s]-->[%s]]%s", $_[0], $_[1], $substr, $_[2]);
$str =~ tr/\t\n\r/ /d;
printf "%s: $str\n", $fileName;
return $_[0] . $substr . $_[2];
}
} # block end
# ---------------------------------------------------------------------
# fix_encoding_args_by_hf_type
#
# Substitute new values for certain proto_tree_add_item() encoding arg
# values (for specified hf field types)
# Variants: search for and display for "exceptions" to allowed encoding arg values;
# search for and display all encoding arg values
# args:
# substitute_flag: 1: replace specified encoding arg values by a new value (keys/values in search hash);
# 2: search for "exceptions" to allowed encoding arg values (values in search hash);
# 3: search for all encoding arg values
# ref to array containing two elements:
# - ref to array containing hf[] types to be processed (FT_STRING, etc)
# - ref to hash containing search (keys) and replacement (values) for encoding arg
# fcn_name string
# ref to string containing file contents
# ref to hfArrayEntries hash (key: hf name; value: field type)
# filename string
{ # block begin
# shared variables
my $fileName;
my $searchReplaceHRef;
my $found;
my $hf_field_type;
sub fix_encoding_args_by_hf_type {
(my $subFlag, my $mapArg, my $fcn_name, my $fileContentsRef, my $hfArrayEntryFieldTypeHRef, $fileName) = @_;
my $hf_index_name;
my $hfTypesARef;
my $encArgPat;
$hfTypesARef = $$mapArg[0];
$searchReplaceHRef = $$mapArg[1];
my %hfTypes;
@hfTypes{@$hfTypesARef}=();
# set up the encoding arg match pattern
if ($subFlag == 1) {
# just match for <fcn_name>() statements which have an encoding arg matching one of the
# keys in the searchReplace hash.
# Escape any "|" characters in the keys
# and then create "alternatives" string containing all the resulting key strings. Ex: "A|B|C\|D|..."
$encArgPat = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } keys %$searchReplaceHRef;
} elsif ($subFlag == 2) {
# Find all the <fcn_name>() statements wherein the encoding arg is a value other than
# one of the "replace" values.
# Uses zero-length negative-lookahead to find <fcn_name>() statements for which the encoding
# arg is something other than one of the provided replace values.
# Escape any "|" characters in the values to be matched
# and then create "alternatives" string containing all the value strings. Ex: "A|B|C\|D|..."
my $match_str = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } values %$searchReplaceHRef;
$encArgPat = qr /
(?! # negative zero-length look-ahead
\s*
(?: $match_str ) # alternatives we don't want to match
\s*
)
[^,)]+? # OK: enoding arg is other than one of the alternatives:
# match to end of the arg
/x;
} elsif ($subFlag == 3) {
# match for <fcn_name>() statements for any value of the encoding parameter
# IOW: find all the proto_tree_add_item statements with an hf entry of the desired types
$encArgPat = qr / [^,)]+? /x;
}
my @hf_index_names;
# For each hf[] entry which matches a type in %hfTypes do replacements
$found = 0;
foreach my $key (keys %$hfArrayEntryFieldTypeHRef) {
$hf_index_name = $key;
$hf_field_type = $$hfArrayEntryFieldTypeHRef{$key};
##printf "--> %-35.35s: %s\n", $hf_index_name, $hf_field_type;
next unless exists $hfTypes{$hf_field_type}; # Do we want to process for this hf[] entry type ?
##print "\n$hf_index_name $hf_field_type\n";
push @hf_index_names, $hf_index_name;
}
if (@hf_index_names) {
# build the complete pattern
my $hf_index_names_re = join('|', @hf_index_names);
$hf_index_names_re =~ s/\[|\]/\\$&/g; # escape any "[" or "]" characters
my $patRegEx = qr /
# part 1: $1
(
$fcn_name \s* \(
[^;]+?
,\s*
(?:$hf_index_names_re)
\s*,
[^;]+
,\s*
)
# part 2: $2
# exact match of pattern (including spaces)
((?-x)$encArgPat)
# part 3: $3
(
\s* \)
\s* ;
)
/xs;
##print "\n$patRegEx\n";
## Match and substitute as specified
$$fileContentsRef =~ s/ $patRegEx /patsub($1,$2,$3)/xges;
}
return $found;
}
# Called from fix_encoding_args to determine replacement string when a regex match is encountered
# $_[0]: part 1
# $_[1]: part 2: encoding arg
# $_[2]: part 3
# lookup the desired replacement value for the encoding arg
# print match string showing and highlighting the encoding arg replacement
# return "replacement" string
sub patsub {
$found += 1;
my $substr = exists $$searchReplaceHRef{$_[1]} ? $$searchReplaceHRef{$_[1]} : "???";
my $str = sprintf("%s[[%s]-->[%s]]%s", $_[0], $_[1], $substr, $_[2]);
$str =~ tr/\t\n\r/ /d;
printf "%s: %-17.17s $str\n", $fileName, $hf_field_type . ":";
return $_[0] . $substr . $_[2];
}
} # block end
# ---------------------------------------------------------------------
# fix_dissector_table_args
# Substitute new values for the specified <fcn_name>() encoding arg values
# when the encoding arg is the *last* arg of the call to fcn_name
# args:
# substitute_flag: 1: replace specified encoding arg values by a new value (keys/values in search hash);
# ref to hash containing search (keys) and replacement (values) for encoding arg
# fcn_name string
# ref to string containing file contents
# filename string
#
{ # block begin
# shared variables
my $fileName;
my $searchReplaceHRef;
my $found;
sub fix_dissector_table_args {
(my $subFlag, $searchReplaceHRef, my $fcn_name, my $fileContentsRef, $fileName) = @_;
my $encArgPat;
if ($subFlag == 1) {
# just match for <fcn_name>() statements which have an encoding arg matching one of the
# keys in the searchReplace hash.
# Escape any "|" characters in the keys
# and then create "alternatives" string containing all the resulting key strings. Ex: "(A|B|C\|D|..."
$encArgPat = join "|", map { my $copy = $_; $copy =~ s{ ( \| ) }{\\$1}gx; $copy } keys %$searchReplaceHRef;
} elsif ($subFlag == 3) {
# match for <fcn_name>() statements for any value of the encoding parameter
# IOW: find all the <fcn_name> statements
$encArgPat = qr / [^,)]+? /x;
}
# build the complete pattern
my $patRegEx = qr /
# part 1: $1
(
(?:^|=) # don't try to handle fcn_name call when arg of another fcn call
\s*
$fcn_name \s* \(
[^;]+? # a bit dangerous
,\s*
FT_STRING[A-Z]*
,\s*
)
# part 2: $2
# exact match of pattern (including spaces)
((?-x)$encArgPat)
# part 3: $3
(
\s* \)
\s* ;
)
/xms; # m for ^ above
##print "$patRegEx\n";
## Match and substitute as specified
$found = 0;
$$fileContentsRef =~ s/ $patRegEx /patsuby($1,$2,$3)/xges;
return $found;
}
# Called from fix_encoding_args to determine replacement string when a regex match is encountered
# $_[0]: part 1
# $_[1]: part 2: encoding arg
# $_[2]: part 3
# lookup the desired replacement value for the encoding arg
# print match string showing and highlighting the encoding arg replacement
# return "replacement" string
sub patsuby {
$found += 1;
my $substr = exists $$searchReplaceHRef{$_[1]} ? $$searchReplaceHRef{$_[1]} : "???";
my $str = sprintf("%s[[%s]-->[%s]]%s", $_[0], $_[1], $substr, $_[2]);
$str =~ tr/\t\n\r/ /d;
printf "%s: $str\n", $fileName;
return $_[0] . $substr . $_[2];
}
} # block end
# ---------------------------------------------------------------------
# Find all <fcnList> statements
# and output same highlighting the encoding arg
# Currently: encoding arg is matched as the *last* arg of the function call
sub find_all {
my( $fcnListARef, $fileContentsRef, $fileName) = @_;
my $found = 0;
my $fcnListPat = join "|", @$fcnListARef;
my $pat = qr /
(
(?:$fcnListPat) \s* \(
[^;]+
, \s*
)
(
[^ \t,)]+?
)
(
\s* \)
\s* ;
)
/xs;
while ($$fileContentsRef =~ / $pat /xgso) {
my $str = "${1}[[${2}]]${3}\n";
$str =~ tr/\t\n\r/ /d;
$str =~ s/ \s+ / /xg;
print "$fileName: $str\n";
$found += 1;
}
return $found;
} |
Shell Script | wireshark/tools/fuzz-test.sh | #!/bin/bash
#
# Fuzz-testing script for TShark
#
# This script uses Editcap to add random errors ("fuzz") to a set of
# capture files specified on the command line. It runs TShark on
# each fuzzed file and checks for errors. The files are processed
# repeatedly until an error is found.
#
# Copyright 2013 Gerald Combs <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
TEST_TYPE="fuzz"
# shellcheck source=tools/test-common.sh
. "$( dirname "$0" )"/test-common.sh || exit 1
# Sanity check to make sure we can find our plugins. Zero or less disables.
MIN_PLUGINS=0
# Did we catch a signal or time out?
DONE=false
# Currently running children
RUNNER_PIDS=
# Perform a two-pass analysis on the capture file?
TWO_PASS=
# Specific config profile ?
CONFIG_PROFILE=
# Run under valgrind ?
VALGRIND=0
# Abort on UTF-8 encoding errors
CHECK_UTF_8="--log-fatal-domains=UTF-8 "
# Run under AddressSanitizer ?
ASAN=$CONFIGURED_WITH_ASAN
# Don't skip any byte from being changed
CHANGE_OFFSET=0
# The maximum permitted amount of memory leaked. Eventually this should be
# worked down to zero, but right now that would fail on every single capture.
# Only has effect when running under valgrind.
MAX_LEAK=$(( 1024 * 100 ))
# Our maximum run time.
RUN_START_SECONDS=$SECONDS
RUN_MAX_SECONDS=$(( RUN_START_SECONDS + 86400 ))
# To do: add options for file names and limits
while getopts "2b:C:d:e:agp:P:o:t:U" OPTCHAR ; do
case $OPTCHAR in
a) ASAN=1 ;;
2) TWO_PASS="-2 " ;;
b) WIRESHARK_BIN_DIR=$OPTARG ;;
C) CONFIG_PROFILE="-C $OPTARG " ;;
d) TMP_DIR=$OPTARG ;;
e) ERR_PROB=$OPTARG ;;
g) VALGRIND=1 ;;
p) MAX_PASSES=$OPTARG ;;
P) MIN_PLUGINS=$OPTARG ;;
o) CHANGE_OFFSET=$OPTARG ;;
t) RUN_MAX_SECONDS=$(( RUN_START_SECONDS + OPTARG )) ;;
U) CHECK_UTF_8= ;; # disable
*) printf "Unknown option %s\n" "$OPTCHAR"
esac
done
shift $((OPTIND - 1))
### usually you won't have to change anything below this line ###
ws_bind_exec_paths
ws_check_exec "$TSHARK" "$EDITCAP" "$CAPINFOS" "$DATE" "$TMP_DIR"
COMMON_ARGS="${CONFIG_PROFILE}${TWO_PASS}${CHECK_UTF_8}"
KEEP=
PACKET_RANGE=
if [ $VALGRIND -eq 1 ]; then
RUNNER=$( dirname "$0" )"/valgrind-wireshark.sh"
COMMON_ARGS="-b $WIRESHARK_BIN_DIR $COMMON_ARGS"
declare -a RUNNER_ARGS=("" "-T")
# Valgrind requires more resources, so permit 1.5x memory and 3x time
# (1.5x time is too small for a few large captures in the menagerie)
MAX_CPU_TIME=$(( 3 * MAX_CPU_TIME ))
MAX_VMEM=$(( 3 * MAX_VMEM / 2 ))
# Valgrind is slow. Trim captures to the first 10k packets so that
# we don't time out.
KEEP=-r
PACKET_RANGE=1-10000
else
# Not using valgrind, use regular tshark.
# TShark arguments (you won't have to change these)
# n Disable network object name resolution
# V Print a view of the details of the packet rather than a one-line summary of the packet
# x Cause TShark to print a hex and ASCII dump of the packet data after printing the summary or details
# r Read packet data from the following infile
RUNNER="$TSHARK"
declare -a RUNNER_ARGS=("-nVxr" "-nr")
# Running with a read filter but without generating the tree exposes some
# "More than 100000 items in tree" bugs.
# Not sure if we want to add even more cycles to the fuzz bot's work load...
#declare -a RUNNER_ARGS=("${CONFIG_PROFILE}${TWO_PASS}-nVxr" "${CONFIG_PROFILE}${TWO_PASS}-nr" "-Yframe ${CONFIG_PROFILE}${TWO_PASS}-nr")
fi
# Make sure we have a valid test set
FOUND=0
for CF in "$@" ; do
if [ "$OSTYPE" == "cygwin" ] ; then
CF=$( cygpath --windows "$CF" )
fi
"$CAPINFOS" "$CF" > /dev/null 2>&1 && FOUND=1
if [ $FOUND -eq 1 ] ; then break ; fi
done
if [ $FOUND -eq 0 ] ; then
cat <<FIN
Error: No valid capture files found.
Usage: $( basename "$0" ) [-2] [-b bin_dir] [-C config_profile] [-d work_dir] [-e error probability] [-o changes offset] [-g] [-a] [-p passes] capture file 1 [capture file 2]...
FIN
exit 1
fi
PLUGIN_COUNT=$( $TSHARK -G plugins | grep -c dissector )
if [ "$MIN_PLUGINS" -gt 0 ] && [ "$PLUGIN_COUNT" -lt "$MIN_PLUGINS" ] ; then
echo "Warning: Found fewer plugins than expected ($PLUGIN_COUNT vs $MIN_PLUGINS)."
exit 1
fi
if [ $ASAN -ne 0 ]; then
echo -n "ASan enabled. Virtual memory limit is "
ulimit -v
else
echo "ASan disabled. Virtual memory limit is $MAX_VMEM"
fi
HOWMANY="forever"
if [ "$MAX_PASSES" -gt 0 ]; then
HOWMANY="$MAX_PASSES passes"
fi
echo -n "Running $RUNNER $COMMON_ARGS with args: "
printf "\"%s\"\n" "${RUNNER_ARGS[@]}"
echo "($HOWMANY)"
echo ""
# Clean up on <ctrl>C, etc
trap_all() {
printf '\n\nCaught signal. Exiting.\n'
rm -f "$TMP_DIR/$TMP_FILE" "$TMP_DIR/$ERR_FILE"*
exit 0
}
trap_abrt() {
for RUNNER_PID in $RUNNER_PIDS ; do
kill -ABRT "$RUNNER_PID"
done
trap_all
}
trap trap_all HUP INT TERM
trap trap_abrt ABRT
# Iterate over our capture files.
PASS=0
while { [ $PASS -lt "$MAX_PASSES" ] || [ "$MAX_PASSES" -lt 1 ]; } && ! $DONE ; do
PASS=$(( PASS+1 ))
echo "Pass $PASS:"
RUN=0
for CF in "$@" ; do
if $DONE; then
break # We caught a signal or timed out
fi
RUN=$(( RUN + 1 ))
if [ $(( RUN % 50 )) -eq 0 ] ; then
echo " [Pass $PASS]"
fi
if [ "$OSTYPE" == "cygwin" ] ; then
CF=$( cygpath --windows "$CF" )
fi
"$CAPINFOS" "$CF" > /dev/null 2> "$TMP_DIR/$ERR_FILE"
RETVAL=$?
if [ $RETVAL -eq 1 ] || [ $RETVAL -eq 2 ] ; then
echo "Not a valid capture file"
rm -f "$TMP_DIR/$ERR_FILE"
continue
elif [ $RETVAL -ne 0 ] && ! $DONE ; then
# Some other error
ws_exit_error
fi
# Choose a random subset of large captures.
KEEP=
PACKET_RANGE=
CF_PACKETS=$( "$CAPINFOS" -T -r -c "$CF" | cut -f2 )
if [[ CF_PACKETS -gt $MAX_FUZZ_PACKETS ]] ; then
START_PACKET=$(( CF_PACKETS - MAX_FUZZ_PACKETS ))
START_PACKET=$( shuf --input-range=1-$START_PACKET --head-count=1 )
END_PACKET=$(( START_PACKET + MAX_FUZZ_PACKETS ))
KEEP=-r
PACKET_RANGE="$START_PACKET-$END_PACKET"
printf " Fuzzing packets %d-%d of %d\n" "$START_PACKET" "$END_PACKET" "$CF_PACKETS"
fi
DISSECTOR_BUG=0
VG_ERR_CNT=0
printf " %s: " "$( basename "$CF" )"
# shellcheck disable=SC2086
"$EDITCAP" -E "$ERR_PROB" -o "$CHANGE_OFFSET" $KEEP "$CF" "$TMP_DIR/$TMP_FILE" $PACKET_RANGE > /dev/null 2>&1
RETVAL=$?
if [ $RETVAL -ne 0 ] ; then
# shellcheck disable=SC2086
"$EDITCAP" -E "$ERR_PROB" -o "$CHANGE_OFFSET" $KEEP -T ether "$CF" "$TMP_DIR/$TMP_FILE" $PACKET_RANGE \
> /dev/null 2>&1
RETVAL=$?
if [ $RETVAL -ne 0 ] ; then
echo "Invalid format for editcap"
continue
fi
fi
FILE_START_SECONDS=$SECONDS
RUNNER_PIDS=
RUNNER_ERR_FILES=
for ARGS in "${RUNNER_ARGS[@]}" ; do
if $DONE; then
break # We caught a signal
fi
echo -n "($ARGS) "
# Run in a child process with limits.
(
# Set some limits to the child processes, e.g. stop it if
# it's running longer than MAX_CPU_TIME seconds. (ulimit
# is not supported well on cygwin - it shows some warnings -
# and the features we use may not all be supported on some
# UN*X platforms.)
ulimit -S -t "$MAX_CPU_TIME" -s "$MAX_STACK"
# Allow core files to be generated
ulimit -c unlimited
# Don't enable ulimit -v when using ASAN. See
# https://github.com/google/sanitizers/wiki/AddressSanitizer#ulimit--v
if [ $ASAN -eq 0 ]; then
ulimit -S -v "$MAX_VMEM"
fi
# shellcheck disable=SC2016
SUBSHELL_PID=$($SHELL -c 'echo $PPID')
printf 'Command and args: %s %s %s\n' "$RUNNER" "$COMMON_ARGS" "$ARGS" > "$TMP_DIR/$ERR_FILE.$SUBSHELL_PID"
# shellcheck disable=SC2086
"$RUNNER" $COMMON_ARGS $ARGS "$TMP_DIR/$TMP_FILE" \
> /dev/null 2>> "$TMP_DIR/$ERR_FILE.$SUBSHELL_PID"
) &
RUNNER_PID=$!
RUNNER_PIDS="$RUNNER_PIDS $RUNNER_PID"
RUNNER_ERR_FILES="$RUNNER_ERR_FILES $TMP_DIR/$ERR_FILE.$RUNNER_PID"
if [ $SECONDS -ge $RUN_MAX_SECONDS ] ; then
printf "\nStopping after %d seconds.\n" $(( SECONDS - RUN_START_SECONDS ))
DONE=true
fi
done
for RUNNER_PID in $RUNNER_PIDS ; do
wait "$RUNNER_PID"
RUNNER_RETVAL=$?
mv "$TMP_DIR/$ERR_FILE.$RUNNER_PID" "$TMP_DIR/$ERR_FILE"
# Uncomment the next two lines to enable dissector bug
# checking.
#grep -i "dissector bug" $TMP_DIR/$ERR_FILE \
# > /dev/null 2>&1 && DISSECTOR_BUG=1
if [ $VALGRIND -eq 1 ] && ! $DONE; then
VG_ERR_CNT=$( grep "ERROR SUMMARY:" "$TMP_DIR/$ERR_FILE" | cut -f4 -d' ' )
VG_DEF_LEAKED=$( grep "definitely lost:" "$TMP_DIR/$ERR_FILE" | cut -f7 -d' ' | tr -d , )
VG_IND_LEAKED=$( grep "indirectly lost:" "$TMP_DIR/$ERR_FILE" | cut -f7 -d' ' | tr -d , )
VG_TOTAL_LEAKED=$(( VG_DEF_LEAKED + VG_IND_LEAKED ))
if [ $RUNNER_RETVAL -ne 0 ] ; then
echo "General Valgrind failure."
VG_ERR_CNT=1
elif [ "$VG_TOTAL_LEAKED" -gt "$MAX_LEAK" ] ; then
echo "Definitely + indirectly ($VG_DEF_LEAKED + $VG_IND_LEAKED) exceeds max ($MAX_LEAK)."
echo "Definitely + indirectly ($VG_DEF_LEAKED + $VG_IND_LEAKED) exceeds max ($MAX_LEAK)." >> "$TMP_DIR/$ERR_FILE"
VG_ERR_CNT=1
fi
if grep -q "Valgrind cannot continue" "$TMP_DIR/$ERR_FILE" ; then
echo "Valgrind unable to continue."
VG_ERR_CNT=-1
fi
fi
if ! $DONE && { [ $RUNNER_RETVAL -ne 0 ] || [ $DISSECTOR_BUG -ne 0 ] || [ $VG_ERR_CNT -ne 0 ]; } ; then
# shellcheck disable=SC2086
rm -f $RUNNER_ERR_FILES
ws_exit_error
fi
done
printf " OK (%s seconds)\\n" $(( SECONDS - FILE_START_SECONDS ))
rm -f "$TMP_DIR/$TMP_FILE" "$TMP_DIR/$ERR_FILE"
done
done |
wireshark/tools/gen-bugnote | #!/bin/bash
#
# Given a Wireshark issue ID, fetch its title and prepare an entry suitable
# for pasting into the release notes. Requires curl and jq.
#
# Usage: gen-bugnote <issue number>
#
# Copyright 2013 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
gitlab_issue_url_pfx="https://gitlab.com/api/v4/projects/wireshark%2Fwireshark/issues"
issue_id="${1#\#}" # Strip leading "#"
case "$OSTYPE" in
darwin*)
clipboard_cmd="pbcopy -Pascii"
;;
cygwin*)
clipboard_cmd="cat > /dev/clipboard"
;;
linux*)
clipboard_cmd="xsel --clipboard"
;;
*)
echo "Unable to copy to clipboard"
clipboard_cmd="cat > /dev/null"
;;
esac
if [ -z "$issue_id" ] ; then
echo "Usage: $( basename "$0" ) <issue id>"
exit 1
fi
issue_title=$(
curl -s -o - "${gitlab_issue_url_pfx}/$issue_id" \
| jq '.title'
)
# We can escape backslashes in jq's --raw-output or we can trim quotes off
# its plain output.
issue_title="${issue_title%\"}"
issue_title="${issue_title#\"}"
trailing_period=""
if [[ ! ${issue_title: -1} =~ [[:punct:]] ]] ; then
trailing_period="."
fi
printf "* %s%s wsbuglink:${issue_id}[].\\n" "$issue_title" "$trailing_period" \
| $clipboard_cmd
echo "Copied $issue_id: $issue_title" |
|
Python | wireshark/tools/generate-bacnet-vendors.py | #!/usr/bin/env python2
'''
Copyright 2014 Anish Bhatt <[email protected]>
SPDX-License-Identifier: GPL-2.0-or-later
'''
from bs4 import BeautifulSoup
import urllib
import sys
import string
# Required to convert accents/diaeresis etc.
import translitcodec
f = urllib.urlopen("http://www.bacnet.org/VendorID/BACnet%20Vendor%20IDs.htm")
html = f.read()
soup = BeautifulSoup(''.join(html))
entry = "static const value_string\nBACnetVendorIdentifiers [] = {"
table = soup.find('table')
rows = table.findAll('tr')
for tr in rows:
cols = tr.findAll('td')
for index,td in enumerate(cols[0:2]):
text = ''.join(td.find(text=True))
if index == 0:
entry = " { %3s" % text
else:
entry += ", \"%s\" }," % text.rstrip()
# Required specially for "Dorsette's Inc." due to malformed html
entry = entry.replace(u'\u0092', u'\'')
# Required to convert accents/diaeresis etc.
entry = entry.encode('translit/long')
# Encode to ascii so we can out to file
entry = entry.encode("ascii",'ignore')
print entry
entry = " { 0, NULL }\n};"
print entry.encode("ascii") |
Python | wireshark/tools/generate-dissector.py | #!/usr/bin/env python3
#
# Copyright 2019, Dario Lombardo <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# This script generates a Wireshark skeleton dissector, based on the example in the doc/ directory.
#
# Example usage:
#
# generate-dissector.py --name "My Self" --email "[email protected]" --protoname "The dumb protocol"
# --protoshortname DUMB --protoabbrev dumb --license GPL-2.0-or-later --years "2019-2020"
#
import argparse
from datetime import datetime
import os
parser = argparse.ArgumentParser(description='The Wireshark Dissector Generator')
parser.add_argument("--name", help="The author of the dissector", required=True)
parser.add_argument("--email", help="The email address of the author", required=True)
parser.add_argument("--protoname", help="The name of the protocol", required=True)
parser.add_argument("--protoshortname", help="The protocol short name", required=True)
parser.add_argument("--protoabbrev", help="The protocol abbreviation", required=True)
parser.add_argument("--license", help="The license for this dissector (please use a SPDX-License-Identifier). If omitted, %(default)s will be used", default="GPL-2.0-or-later")
parser.add_argument("--years", help="Years of validity for the license. If omitted, the current year will be used", default=str(datetime.now().year))
parser.add_argument("-f", "--force", action='store_true', help="Force overwriting the dissector file if it already exists")
parser.add_argument("-p", "--plugin", action='store_true', help="Create as a plugin. Default is to create in epan")
def wsdir():
return os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
def output_dir(args):
if args.plugin:
os.makedirs(os.path.join(wsdir(), "plugins/epan/" + args.protoabbrev), exist_ok=True)
return os.path.join(wsdir(), "plugins/epan/" + args.protoabbrev)
return os.path.join(wsdir(), "epan/dissectors")
def output_file(args):
return os.path.join(output_dir(args), "packet-" + args.protoabbrev + ".c")
def read_skeleton(filename):
skeletonfile = os.path.join(wsdir(), "doc/" + filename)
print("Reading skeleton file: " + skeletonfile)
return open(skeletonfile).read()
def replace_fields(buffer, args):
print("Replacing fields in skeleton")
output = buffer\
.replace("YOUR_NAME", args.name)\
.replace("YOUR_EMAIL_ADDRESS", args.email)\
.replace("PROTONAME", args.protoname)\
.replace("PROTOSHORTNAME", args.protoshortname)\
.replace("PROTOABBREV", args.protoabbrev)\
.replace("FIELDNAME", "Sample Field")\
.replace("FIELDABBREV", "sample_field")\
.replace("FT_FIELDTYPE", "FT_STRING")\
.replace("FIELDDISPLAY", "BASE_NONE")\
.replace("FIELDCONVERT", "NULL")\
.replace("BITMASK", "0x0")\
.replace("FIELDDESCR", "NULL")\
.replace("MAX_NEEDED_FOR_HEURISTICS", "1")\
.replace("TEST_HEURISTICS_FAIL", "0")\
.replace("ENC_xxx", "ENC_NA")\
.replace("EXPERTABBREV", "expert")\
.replace("PI_GROUP", "PI_PROTOCOL")\
.replace("PI_SEVERITY", "PI_ERROR")\
.replace("TEST_EXPERT_condition", "0")\
.replace("const char *subtree", "\"\"")\
.replace("LICENSE", args.license)\
.replace("YEARS", args.years)
return output
def write_dissector(buffer, args):
ofile = output_file(args)
if os.path.isfile(ofile) and not args.force:
raise Exception("The file " + ofile + " already exists. You're likely overwriting an existing dissector.")
print("Writing output file: " + ofile)
return open(ofile, "w").write(buffer)
def patch_makefile(args):
if args.plugin:
cmakefile = os.path.join(wsdir(), "CMakeLists.txt")
patchline = "\t\tplugins/epan/" + args.protoabbrev
groupstart = "set(PLUGIN_SRC_DIRS"
else:
cmakefile = os.path.join(wsdir(), "epan/dissectors/CMakeLists.txt")
patchline = "\t${CMAKE_CURRENT_SOURCE_DIR}/packet-" + args.protoabbrev + ".c"
groupstart = "set(DISSECTOR_SRC"
print("Patching makefile: " + cmakefile)
output = ""
in_group = False
patched = False
for line in open(cmakefile):
line_strip = line.strip()
if in_group and line_strip == ")":
in_group = False
if in_group and not patched and line_strip > patchline:
output += patchline + "\n"
patched = True
if line_strip == groupstart:
in_group = True
if line_strip != patchline:
output += line
open(cmakefile, "w").write(output)
def write_plugin_makefile(args):
if not args.plugin:
return True
buffer = replace_fields(read_skeleton("CMakeLists-PROTOABBREV.txt"), args)
ofile = os.path.join(output_dir(args), "CMakeLists.txt")
print("Writing output file: " + ofile)
return open(ofile, "w").write(buffer)
def print_header():
print("")
print("**************************************************")
print("* Wireshark skeleton dissector generator *")
print("* *")
print("* Generate a new dissector for your protocol *")
print("* starting from the skeleton provided in the *")
print("* doc directory. *")
print("* *")
print("* Copyright 2019 Dario Lombardo *")
print("**************************************************")
print("")
def print_trailer(args):
print("")
print("The skeleton for the dissector of the " + args.protoshortname + " protocol has been generated.")
print("Please review/extend it to match your specific criterias.")
print("")
if __name__ == '__main__':
print_header()
args = parser.parse_args()
buffer = replace_fields(read_skeleton("packet-PROTOABBREV.c"), args)
write_dissector(buffer, args)
patch_makefile(args)
write_plugin_makefile(args)
print_trailer(args) |
Python | wireshark/tools/generate-nl80211-fields.py | #!/usr/bin/env python3
# Parses the nl80211.h interface and generate appropriate enums and fields
# (value_string) for packet-netlink-nl80211.c
#
# Copyright (c) 2017, Peter Wu <[email protected]>
# Copyright (c) 2018, Mikael Kanstrup <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
#
# To update the dissector source file, run this from the source directory:
#
# python tools/generate-nl80211-fields.py --update
#
import argparse
import re
import requests
import sys
# Begin of comment, followed by the actual array definition
HEADER = "/* Definitions from linux/nl80211.h {{{ */\n"
FOOTER = "/* }}} */\n"
# Enums to extract from the header file
EXPORT_ENUMS = {
# 'enum_name': ('field_name', field_type', 'field_blurb')
'nl80211_commands': ('Command', 'FT_UINT8', '"Generic Netlink Command"'),
'nl80211_attrs': (None, None, None),
'nl80211_iftype': (None, None, None),
'nl80211_sta_flags': (None, None, None),
'nl80211_sta_p2p_ps_status': ('Attribute Value', 'FT_UINT8', None),
'nl80211_he_gi': (None, None, None),
'nl80211_he_ru_alloc': (None, None, None),
'nl80211_rate_info': (None, None, None),
'nl80211_sta_bss_param': (None, None, None),
'nl80211_sta_info': (None, None, None),
'nl80211_tid_stats': (None, None, None),
'nl80211_txq_stats': (None, None, None),
'nl80211_mpath_flags': (None, None, None),
'nl80211_mpath_info': (None, None, None),
'nl80211_band_iftype_attr': (None, None, None),
'nl80211_band_attr': (None, None, None),
'nl80211_wmm_rule': (None, None, None),
'nl80211_frequency_attr': (None, None, None),
'nl80211_bitrate_attr': (None, None, None),
'nl80211_reg_initiator': ('Attribute Value', 'FT_UINT8', None),
'nl80211_reg_type': ('Attribute Value', 'FT_UINT8', None),
'nl80211_reg_rule_attr': (None, None, None),
'nl80211_sched_scan_match_attr': (None, None, None),
'nl80211_reg_rule_flags': (None, None, None),
'nl80211_dfs_regions': ('Attribute Value', 'FT_UINT8', None),
'nl80211_user_reg_hint_type': ('Attribute Value', 'FT_UINT32', None),
'nl80211_survey_info': (None, None, None),
'nl80211_mntr_flags': (None, None, None),
'nl80211_mesh_power_mode': ('Attribute Value', 'FT_UINT32', None),
'nl80211_meshconf_params': (None, None, None),
'nl80211_mesh_setup_params': (None, None, None),
'nl80211_txq_attr': (None, None, None),
'nl80211_ac': (None, None, None),
'nl80211_channel_type': ('Attribute Value', 'FT_UINT32', None),
'nl80211_key_mode': (None, None, None),
'nl80211_chan_width': ('Attribute Value', 'FT_UINT32', None),
'nl80211_bss_scan_width': ('Attribute Value', 'FT_UINT32', None),
'nl80211_bss': (None, None, None),
'nl80211_bss_status': ('Attribute Value', 'FT_UINT32', None),
'nl80211_auth_type': ('Attribute Value', 'FT_UINT32', None),
'nl80211_key_type': ('Attribute Value', 'FT_UINT32', None),
'nl80211_mfp': ('Attribute Value', 'FT_UINT32', None),
'nl80211_wpa_versions': (None, None, None),
'nl80211_key_default_types': (None, None, None),
'nl80211_key_attributes': (None, None, None),
'nl80211_tx_rate_attributes': (None, None, None),
'nl80211_txrate_gi': (None, None, None),
'nl80211_band': (None, None, None),
'nl80211_ps_state': ('Attribute Value', 'FT_UINT32', None),
'nl80211_attr_cqm': (None, None, None),
'nl80211_cqm_rssi_threshold_event': (None, None, None),
'nl80211_tx_power_setting': ('Attribute Value', 'FT_UINT32', None),
'nl80211_packet_pattern_attr': (None, None, None),
'nl80211_wowlan_triggers': (None, None, None),
'nl80211_wowlan_tcp_attrs': (None, None, None),
'nl80211_attr_coalesce_rule': (None, None, None),
'nl80211_coalesce_condition': (None, None, None),
'nl80211_iface_limit_attrs': (None, None, None),
'nl80211_if_combination_attrs': (None, None, None),
'nl80211_plink_state': ('Attribute Value', 'FT_UINT8', None),
'plink_actions': ('Attribute Value', 'FT_UINT8', None),
'nl80211_rekey_data': (None, None, None),
'nl80211_hidden_ssid': (None, None, None),
'nl80211_sta_wme_attr': (None, None, None),
'nl80211_pmksa_candidate_attr': (None, None, None),
'nl80211_tdls_operation': ('Attribute Value', 'FT_UINT8', None),
#Reserved for future use 'nl80211_ap_sme_features': (None, None, None),
'nl80211_feature_flags': (None, None, None),
'nl80211_ext_feature_index': (None, None, None),
'nl80211_probe_resp_offload_support_attr': (None, None, None),
'nl80211_connect_failed_reason': ('Attribute Value', 'FT_UINT32', None),
'nl80211_timeout_reason': ('Attribute Value', 'FT_UINT32', None),
'nl80211_scan_flags': (None, None, None),
'nl80211_acl_policy': ('Attribute Value', 'FT_UINT32', None),
'nl80211_smps_mode': ('Attribute Value', 'FT_UINT8', None),
'nl80211_radar_event': ('Attribute Value', 'FT_UINT32', None),
'nl80211_dfs_state': (None, None, None),
'nl80211_protocol_features': (None, None, None),
'nl80211_crit_proto_id': ('Attribute Value', 'FT_UINT16', None),
'nl80211_rxmgmt_flags': (None, None, None),
'nl80211_tdls_peer_capability': (None, None, None),
'nl80211_sched_scan_plan': (None, None, None),
'nl80211_bss_select_attr': (None, None, None),
'nl80211_nan_function_type': (None, None, None),
'nl80211_nan_publish_type': (None, None, None),
'nl80211_nan_func_term_reason': (None, None, None),
'nl80211_nan_func_attributes': (None, None, None),
'nl80211_nan_srf_attributes': (None, None, None),
'nl80211_nan_match_attributes': (None, None, None),
'nl80211_external_auth_action': ('Attribute Value', 'FT_UINT32', None),
'nl80211_ftm_responder_attributes': (None, None, None),
'nl80211_ftm_responder_stats': (None, None, None),
'nl80211_preamble': (None, None, None),
'nl80211_peer_measurement_type': (None, None, None),
'nl80211_peer_measurement_status': (None, None, None),
'nl80211_peer_measurement_req': (None, None, None),
'nl80211_peer_measurement_resp': (None, None, None),
'nl80211_peer_measurement_peer_attrs': (None, None, None),
'nl80211_peer_measurement_attrs': (None, None, None),
'nl80211_peer_measurement_ftm_capa': (None, None, None),
'nl80211_peer_measurement_ftm_req': (None, None, None),
'nl80211_peer_measurement_ftm_failure_reasons': (None, None, None),
'nl80211_peer_measurement_ftm_resp': (None, None, None),
'nl80211_obss_pd_attributes': (None, None, None),
}
# File to be patched
SOURCE_FILE = "epan/dissectors/packet-netlink-nl80211.c"
# URL where the latest version can be found
URL = "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/plain/include/uapi/linux/nl80211.h"
def make_enum(name, values, expressions, indent):
code = 'enum ws_%s {\n' % name
for value, expression in zip(values, expressions):
if expression and 'NL80211' in expression:
expression = 'WS_%s' % expression
if expression:
code += '%sWS_%s = %s,\n' % (indent, value, expression)
else:
code += '%sWS_%s,\n' % (indent, value)
code += '};\n'
return code
def make_value_string(name, values, indent,):
code = 'static const value_string ws_%s_vals[] = {\n' % name
align = 40
for value in values:
code += indent + ('{ WS_%s,' % value).ljust(align - 1) + ' '
code += '"%s" },\n' % value
code += '%s{ 0, NULL }\n' % indent
code += '};\n'
code += 'static value_string_ext ws_%s_vals_ext =' % name
code += ' VALUE_STRING_EXT_INIT(ws_%s_vals);\n' % name
return code
def remove_prefix(prefix, text):
if text.startswith(prefix):
return text[len(prefix):]
return text
def make_hf_defs(name, indent):
code = 'static gint hf_%s = -1;' % name
return code
def make_hf(name, indent):
(field_name, field_type, field_blurb) = EXPORT_ENUMS.get(name)
field_abbrev = name
# Fill in default values
if not field_name:
field_name = 'Attribute Type'
if not field_type:
field_type = 'FT_UINT16'
if not field_blurb:
field_blurb = 'NULL'
# Special treatment of already existing field names
rename_fields = {
'nl80211_attrs': 'nl80211_attr_type',
'nl80211_commands': 'nl80211_cmd'
}
if rename_fields.get(name):
field_abbrev = rename_fields[name]
field_abbrev = remove_prefix('nl80211_', field_abbrev)
code = indent + indent + '{ &hf_%s,\n' % name
code += indent*3 + '{ "%s", "nl80211.%s",\n' % (field_name, field_abbrev)
code += indent*3 + ' %s, BASE_DEC | BASE_EXT_STRING,\n' % (field_type)
code += indent*3 + ' VALS_EXT_PTR(&ws_%s_vals_ext), 0x00,\n' % (name)
code += indent*3 + ' %s, HFILL },\n' % (field_blurb)
code += indent + indent + '},'
return code
def make_ett_defs(name, indent):
code = 'static gint ett_%s = -1;' % name
return code
def make_ett(name, indent):
code = indent + indent + '&ett_%s,' % name
return code
class EnumStore(object):
__RE_ENUM_VALUE = re.compile(
r'\s+?(?P<value>\w+)(?:\ /\*.*?\*\/)?(?:\s*=\s*(?P<expression>.*?))?(?:\s*,|$)',
re.MULTILINE | re.DOTALL)
def __init__(self, name, values):
self.name = name
self.values = []
self.expressions = []
self.active = True
self.parse_values(values)
def parse_values(self, values):
for m in self.__RE_ENUM_VALUE.finditer(values):
value, expression = m.groups()
if value.startswith('NUM_'):
break
if value.endswith('_AFTER_LAST'):
break
if value.endswith('_LAST'):
break
if value.startswith('__') and value.endswith('_NUM'):
break
if expression and expression in self.values:
# Skip aliases
continue
self.values.append(value)
self.expressions.append(expression)
def finish(self):
return self.name, self.values, self.expressions
RE_ENUM = re.compile(
r'enum\s+?(?P<enum>\w+)\s+?\{(?P<values>.*?)\}\;',
re.MULTILINE | re.DOTALL)
RE_COMMENT = re.compile(r'/\*.*?\*/', re.MULTILINE | re.DOTALL)
def parse_header(content):
# Strip comments
content = re.sub(RE_COMMENT, '', content)
enums = []
for m in RE_ENUM.finditer(content):
enum = m.group('enum')
values = m.group('values')
if enum in EXPORT_ENUMS:
enums.append(EnumStore(enum, values).finish())
return enums
def parse_source():
"""
Reads the source file and tries to split it in the parts before, inside and
after the block.
"""
begin, block, end = '', '', ''
parts = []
# Stages: 1 (before block), 2 (in block, skip), 3 (after block)
stage = 1
with open(SOURCE_FILE) as f:
for line in f:
if line == FOOTER and stage == 2:
stage = 3 # End of block
if stage == 1:
begin += line
elif stage == 2:
block += line
elif stage == 3:
end += line
if line == HEADER and stage == 1:
stage = 2 # Begin of block
if line == HEADER and stage == 3:
stage = 2 # Begin of next code block
parts.append((begin, block, end))
begin, block, end = '', '', ''
parts.append((begin, block, end))
if stage != 3 or len(parts) != 3:
raise RuntimeError("Could not parse file (in stage %d) (parts %d)" % (stage, len(parts)))
return parts
parser = argparse.ArgumentParser()
parser.add_argument("--update", action="store_true",
help="Update %s as needed instead of writing to stdout" % SOURCE_FILE)
parser.add_argument("--indent", default=" " * 4,
help="indentation (use \\t for tabs, default 4 spaces)")
parser.add_argument("header_file", nargs="?", default=URL,
help="nl80211.h header file (use - for stdin or a HTTP(S) URL, "
"default %(default)s)")
def main():
args = parser.parse_args()
indent = args.indent.replace("\\t", "\t")
if any(args.header_file.startswith(proto) for proto in ('http:', 'https')):
r = requests.get(args.header_file)
r.raise_for_status()
enums = parse_header(r.text)
elif args.header_file == "-":
enums = parse_header(sys.stdin.read())
else:
with open(args.header_file) as f:
enums = parse_header(f.read())
assert len(enums) == len(EXPORT_ENUMS), \
"Could not parse data, found %d/%d results" % \
(len(enums), len(EXPORT_ENUMS))
code_enums, code_vals, code_hf_defs, code_ett_defs, code_hf, code_ett = '', '', '', '', '', ''
for enum_name, enum_values, expressions in enums:
code_enums += make_enum(enum_name, enum_values, expressions, indent) + '\n'
code_vals += make_value_string(enum_name, enum_values, indent) + '\n'
code_hf_defs += make_hf_defs(enum_name, indent) + '\n'
code_ett_defs += make_ett_defs(enum_name, indent) + '\n'
code_hf += make_hf(enum_name, indent) + '\n'
code_ett += make_ett(enum_name, indent) + '\n'
code_top = code_enums + code_vals + code_hf_defs + '\n' + code_ett_defs
code_top = code_top.rstrip("\n") + "\n"
code = [code_top, code_hf, code_ett]
update = False
if args.update:
parts = parse_source()
# Check if file needs update
for (begin, old_code, end), new_code in zip(parts, code):
if old_code != new_code:
update = True
break
if not update:
print("File is up-to-date")
return
# Update file
with open(SOURCE_FILE, "w") as f:
for (begin, old_code, end), new_code in zip(parts, code):
f.write(begin)
f.write(new_code)
f.write(end)
print("Updated %s" % SOURCE_FILE)
else:
for new_code in code:
print(new_code)
if __name__ == '__main__':
main()
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
# |
Python | wireshark/tools/generate-sysdig-event.py | #!/usr/bin/env python3
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
'''\
Generate Sysdig event dissector sections from the sysdig sources.
Reads driver/event_table.c and driver/ppm_events_public.h and generates
corresponding dissection code in packet-sysdig-event.c. Updates are
performed in-place in the dissector code.
Requires an Internet connection. Assets are loaded from GitHub over HTTPS, from falcosecurity/libs master.
'''
import logging
import os
import os.path
import re
import urllib.request, urllib.error, urllib.parse
import sys
sysdig_repo_pfx = 'https://raw.githubusercontent.com/falcosecurity/libs/master/'
def exit_msg(msg=None, status=1):
if msg is not None:
sys.stderr.write(msg + '\n\n')
sys.stderr.write(__doc__ + '\n')
sys.exit(status)
def get_url_lines(url):
'''Open a URL.
Returns the URL body as a list of lines.
'''
req_headers = { 'User-Agent': 'Wireshark generate-sysdig-event' }
try:
req = urllib.request.Request(url, headers=req_headers)
response = urllib.request.urlopen(req)
lines = response.read().decode().splitlines()
response.close()
except urllib.error.HTTPError as err:
exit_msg("HTTP error fetching {0}: {1}".format(url, err.reason))
except urllib.error.URLError as err:
exit_msg("URL error fetching {0}: {1}".format(url, err.reason))
except OSError as err:
exit_msg("OS error fetching {0}".format(url, err.strerror))
except Exception:
exit_msg("Unexpected error:", sys.exc_info()[0])
return lines
ppm_ev_pub_lines = get_url_lines(sysdig_repo_pfx + 'driver/ppm_events_public.h')
ppme_re = re.compile('^\s+PPME_([A-Z0-9_]+_[EX])\s*=\s*([0-9]+)\s*,')
ppm_sc_x_re = re.compile('^\s+PPM_SC_X\s*\(\s*(\S+)\s*,\s*(\d+)\s*\)')
event_info_d = {}
def get_event_defines():
event_d = {}
for line in ppm_ev_pub_lines:
m = ppme_re.match(line)
if m:
event_d[int(m.group(2))] = m.group(1)
return event_d
def get_syscall_code_defines():
sc_d = {}
for line in ppm_ev_pub_lines:
m = ppm_sc_x_re.match(line)
if m:
sc_d[int(m.group(2))] = m.group(1)
return sc_d
ppm_ev_table_lines = get_url_lines(sysdig_repo_pfx + 'driver/event_table.c')
hf_d = {}
event_info_re = re.compile('^\s+\[\s*PPME_.*\]\s*=\s*{\s*"([A-Za-z0-9_]+)"\s*,[^,]+,[^,]+,\s*([0-9]+)\s*[,{}]')
event_param_re = re.compile('{\s*"([A-Za-z0-9_ ]+)"\s*,\s*PT_([A-Z0-9_]+)\s*,\s*PF_([A-Z0-9_]+)\s*[,}]')
def get_event_names():
'''Return a contiguous list of event names. Names are lower case.'''
event_name_l = []
for line in ppm_ev_table_lines:
ei = event_info_re.match(line)
if ei:
event_name_l.append(ei.group(1))
return event_name_l
# PT_xxx to FT_xxx
pt_to_ft = {
'BYTEBUF': 'BYTES',
'CHARBUF': 'STRING',
'ERRNO': 'INT64',
'FD': 'INT64',
'FLAGS8': 'INT8',
'FLAGS16': 'INT16',
'FLAGS32': 'INT32',
'FSPATH': 'STRING',
'FSRELPATH': 'STRING',
'GID': 'INT32',
'MODE': 'INT32',
'PID': 'INT64',
'UID': 'INT32',
'SYSCALLID': 'UINT16',
}
# FT_xxx to BASE_xxx
force_param_formats = {
'STRING': 'NONE',
'INT.*': 'DEC',
}
def get_event_params():
'''Return a list of dictionaries containing event names and parameter info.'''
event_param_l = []
event_num = 0
force_string_l = ['args', 'env']
for line in ppm_ev_table_lines:
ei = event_info_re.match(line)
ep = event_param_re.findall(line)
if ei and ep:
event_name = ei.group(1)
src_param_count = int(ei.group(2))
if len(ep) != src_param_count:
err_msg = '{}: found {} parameters. Expected {}. Params: {}'.format(
event_name, len(ep), src_param_count, repr(ep))
if len(ep) > src_param_count:
logging.warning(err_msg)
del ep[src_param_count:]
else:
raise NameError(err_msg)
for p in ep:
if p[0] in force_string_l:
param_type = 'STRING'
elif p[1] in pt_to_ft:
param_type = pt_to_ft[p[1]]
elif p[0] == 'flags' and p[1].startswith('INT') and 'HEX' in p[2]:
param_type = 'U' + p[1]
elif 'INT' in p[1]:
# Ints
param_type = p[1]
else:
print(f"p fallback {p}")
# Fall back to bytes
param_type = 'BYTES'
if p[2] == 'NA':
if 'INT' in param_type:
param_format = 'DEC'
else:
param_format = 'NONE'
elif param_type == 'BYTES':
param_format = 'NONE'
else:
param_format = p[2]
for pt_pat, force_pf in force_param_formats.items():
if re.match(pt_pat, param_type) and param_format != force_pf:
err_msg = 'Forcing {} {} format to {}. Params: {}'.format(
event_name, param_type, force_pf, repr(ep))
logging.warning(err_msg)
param_format = force_pf
param_d = {
'event_name': event_name,
'event_num': event_num,
# use replace() to account for "plugin ID" param name (ie: param names with space)
'param_name': p[0].replace(" ", "_"),
'param_type': param_type,
'param_format': param_format,
}
event_param_l.append(param_d)
if ei:
event_num += 1
return event_param_l
def param_to_hf_name(param):
return 'hf_param_{}_{}'.format(param['param_name'], param['param_type'].lower())
def param_to_value_string_name(param):
return '{}_{}_vals'.format(param['param_name'], param['param_type'].lower())
def get_param_desc(param):
# Try to coerce event names and parameters into human-friendly
# strings.
# XXX This could use some work.
# Specific descriptions. Event name + parameter name.
param_descs = {
'accept.queuepct': 'Accept queue per connection',
'execve.args': 'Program arguments',
'execve.comm': 'Command',
'execve.cwd': 'Current working directory',
}
# General descriptions. Event name only.
event_descs = {
'ioctl': 'I/O control',
}
event_name = param['event_name']
param_id = '{}.{}'.format(event_name, param['param_name'])
if param_id in param_descs:
param_desc = param_descs[param_id]
elif event_name in event_descs:
param_desc = '{}: {}'.format(event_descs[event_name], param['param_name'])
else:
param_desc = param['param_name']
return param_desc
def main():
logging.basicConfig(format='%(levelname)s: %(message)s')
# Event list
event_d = get_event_defines()
event_nums = list(event_d.keys())
event_nums.sort()
event_name_l = get_event_names()
event_param_l = get_event_params()
hf_d = {}
for param in event_param_l:
hf_name = param_to_hf_name(param)
hf_d[hf_name] = param
idx_id_to_name = { '': 'no' }
parameter_index_l = []
for en in range (0, len(event_nums)):
param_id = ''
param_l = []
event_var = event_d[en].lower()
for param in event_param_l:
if param['event_num'] == en:
hf_name = param_to_hf_name(param)
param_l.append(hf_name)
param_id += ':' + param['param_name'] + '_' + param['param_type']
ei_str = ''
if param_id not in idx_id_to_name:
idx_id_to_name[param_id] = event_var
ei_str = 'static int * const {}_indexes[] = {{ &{}, NULL }};'.format(
event_var,
', &'.join(param_l)
)
else:
ei_str = '#define {}_indexes {}_indexes'.format(event_var, idx_id_to_name[param_id])
parameter_index_l.append(ei_str)
dissector_path = os.path.join(os.path.dirname(__file__),
'..', 'epan', 'dissectors', 'packet-sysdig-event.c')
dissector_f = open(dissector_path, 'r')
dissector_lines = list(dissector_f)
dissector_f = open(dissector_path, 'w+')
# Strip out old content
strip_re_l = []
strip_re_l.append(re.compile('^static\s+int\s+hf_param_.*;'))
strip_re_l.append(re.compile('^#define\s+EVT_STR_[A-Z0-9_]+\s+"[A-Za-z0-9_]+"'))
strip_re_l.append(re.compile('^#define\s+EVT_[A-Z0-9_]+\s+[0-9]+'))
strip_re_l.append(re.compile('^\s*{\s*EVT_[A-Z0-9_]+\s*,\s*EVT_STR_[A-Z0-9_]+\s*}'))
strip_re_l.append(re.compile('^static\s+const\s+int\s+\*\s*[a-z0-9_]+_[ex]_indexes\[\]\s*=\s*\{\s*&hf_param_.*NULL\s*\}\s*;'))
strip_re_l.append(re.compile('^static\s+int\s*\*\s+const\s+[a-z0-9_]+_[ex]_indexes\[\]\s*=\s*\{\s*&hf_param_.*NULL\s*\}\s*;'))
strip_re_l.append(re.compile('^\s*#define\s+[a-z0-9_]+_[ex]_indexes\s+[a-z0-9_]+_indexes'))
strip_re_l.append(re.compile('^\s*\{\s*EVT_[A-Z0-9_]+_[EX]\s*,\s*[a-z0-9_]+_[ex]_indexes\s*}\s*,'))
strip_re_l.append(re.compile('^\s*\{\s*\d+\s*,\s*"\S+"\s*}\s*,\s*//\s*PPM_SC_\S+'))
strip_re_l.append(re.compile('^\s*{\s*&hf_param_.*},')) # Must all be on one line
for strip_re in strip_re_l:
dissector_lines = [l for l in dissector_lines if not strip_re.search(l)]
# Find our value strings
value_string_re = re.compile('static\s+const\s+value_string\s+([A-Za-z0-9_]+_vals)')
value_string_l = []
for line in dissector_lines:
vs = value_string_re.match(line)
if vs:
value_string_l.append(vs.group(1))
# Add in new content after comments.
header_fields_c = 'Header fields'
header_fields_re = re.compile('/\*\s+' + header_fields_c, flags = re.IGNORECASE)
header_fields_l = []
for hf_name in sorted(hf_d.keys()):
header_fields_l.append('static int {} = -1;'.format(hf_name))
event_names_c = 'Event names'
event_names_re = re.compile('/\*\s+' + event_names_c, flags = re.IGNORECASE)
event_names_l = []
event_str_l = list(set(event_name_l))
event_str_l.sort()
for evt_str in event_str_l:
event_names_l.append('#define EVT_STR_{0:24s} "{1:s}"'.format(evt_str.upper(), evt_str))
event_definitions_c = 'Event definitions'
event_definitions_re = re.compile('/\*\s+' + event_definitions_c, flags = re.IGNORECASE)
event_definitions_l = []
for evt in event_nums:
event_definitions_l.append('#define EVT_{0:24s} {1:3d}'.format(event_d[evt], evt))
value_strings_c = 'Value strings'
value_strings_re = re.compile('/\*\s+' + value_strings_c, flags = re.IGNORECASE)
value_strings_l = []
for evt in event_nums:
evt_num = 'EVT_{},'.format(event_d[evt])
evt_str = 'EVT_STR_' + event_name_l[evt].upper()
value_strings_l.append(' {{ {0:<32s} {1:s} }},'.format(evt_num, evt_str))
parameter_index_c = 'Parameter indexes'
parameter_index_re = re.compile('/\*\s+' + parameter_index_c, flags = re.IGNORECASE)
# parameter_index_l defined above.
event_tree_c = 'Event tree'
event_tree_re = re.compile('/\*\s+' + event_tree_c, flags = re.IGNORECASE)
event_tree_l = []
for evt in event_nums:
evt_num = 'EVT_{}'.format(event_d[evt])
evt_idx = '{}_indexes'.format(event_d[evt].lower())
event_tree_l.append(' {{ {}, {} }},'.format(evt_num, evt_idx))
# Syscall codes
syscall_code_d = get_syscall_code_defines()
syscall_code_c = 'Syscall codes'
syscall_code_re = re.compile('/\*\s+' + syscall_code_c, flags = re.IGNORECASE)
syscall_code_l = []
for sc_num in syscall_code_d:
syscall_code_l.append(f' {{ {sc_num:3}, "{syscall_code_d[sc_num].lower()}" }}, // PPM_SC_{syscall_code_d[sc_num]}')
header_field_reg_c = 'Header field registration'
header_field_reg_re = re.compile('/\*\s+' + header_field_reg_c, flags = re.IGNORECASE)
header_field_reg_l = []
for hf_name in sorted(hf_d.keys()):
param = hf_d[hf_name]
event_name = param['event_name']
param_desc = get_param_desc(param)
param_name = param['param_name']
param_type = param['param_type']
param_format = param['param_format']
fieldconvert = 'NULL'
vs_name = param_to_value_string_name(param)
if vs_name in value_string_l and 'INT' in param_type:
fieldconvert = 'VALS({})'.format(vs_name)
header_field_reg_l.append(' {{ &{}, {{ "{}", "sysdig.param.{}.{}", FT_{}, BASE_{}, {}, 0, NULL, HFILL }} }},'.format(
hf_name,
param_desc,
event_name,
param_name,
param_type,
param_format,
fieldconvert
))
for line in dissector_lines:
fill_comment = None
fill_l = []
if header_fields_re.match(line):
fill_comment = header_fields_c
fill_l = header_fields_l
elif event_names_re.match(line):
fill_comment = event_names_c
fill_l = event_names_l
elif event_definitions_re.match(line):
fill_comment = event_definitions_c
fill_l = event_definitions_l
elif value_strings_re.match(line):
fill_comment = value_strings_c
fill_l = value_strings_l
elif parameter_index_re.match(line):
fill_comment = parameter_index_c
fill_l = parameter_index_l
elif event_tree_re.match(line):
fill_comment = event_tree_c
fill_l = event_tree_l
elif syscall_code_re.match(line):
fill_comment = syscall_code_c
fill_l = syscall_code_l
elif header_field_reg_re.match(line):
fill_comment = header_field_reg_c
fill_l = header_field_reg_l
if fill_comment is not None:
# Write our comment followed by the content
print(('Generating {}, {:d} lines'.format(fill_comment, len(fill_l))))
dissector_f.write('/* {}. Automatically generated by tools/{} */\n'.format(
fill_comment,
os.path.basename(__file__)
))
for line in fill_l:
dissector_f.write('{}\n'.format(line))
# Fill each section only once
del fill_l[:]
else:
# Existing content
dissector_f.write(line)
dissector_f.close()
#
# On with the show
#
if __name__ == "__main__":
sys.exit(main()) |
Python | wireshark/tools/generate_authors.py | #!/usr/bin/env python3
#
# Generate the AUTHORS file combining existing AUTHORS file with
# git commit log.
#
# Usage: generate_authors.py AUTHORS.src
# Copyright 2022 Moshe Kaplan
# Based on generate_authors.pl by Michael Mann
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
import argparse
import io
import re
import subprocess
import sys
def get_git_authors():
'''
Sample line:
# 4321 Navin R. Johnson <[email protected]>
'''
GIT_LINE_REGEX = r"^\s*\d+\s+([^<]*)\s*<([^>]*)>"
cmd = "git --no-pager shortlog --email --summary HEAD".split(' ')
# check_output is used for Python 3.4 compatability
git_cmd_output = subprocess.check_output(cmd, universal_newlines=True, encoding='utf-8')
git_authors = []
for line in git_cmd_output.splitlines():
# Check if this is needed:
line = line.strip()
match = re.match(GIT_LINE_REGEX, line)
name = match.group(1).strip()
email = match.group(2).strip()
# Try to lower how much spam people get:
email = email.replace('@', '[AT]')
git_authors.append((name, email))
return git_authors
def extract_contributors(authors_content):
# Extract names and email addresses from the AUTHORS file Contributors
contributors_content = authors_content.split("= Contributors =", 1)[1]
CONTRIBUTOR_LINE_REGEX = r"^([\w\.\-\'\x80-\xff]+(\s*[\w+\.\-\'\x80-\xff])*)\s+<([^>]*)>"
contributors = []
state = ""
for line in contributors_content.splitlines():
contributor_match = re.match(CONTRIBUTOR_LINE_REGEX, line)
if re.search(r'([^\{]*)\{', line):
if contributor_match:
name = contributor_match.group(1)
email = contributor_match.group(3)
contributors.append((name, email))
state = "s_in_bracket"
elif state == "s_in_bracket":
if re.search(r'([^\}]*)\}', line):
state = ""
elif re.search('<', line):
if contributor_match:
name = contributor_match.group(1)
email = contributor_match.group(3)
contributors.append((name, email))
elif re.search(r"(e-mail address removed at contributor's request)", line):
if contributor_match:
name = contributor_match.group(1)
email = contributor_match.group(3)
contributors.append((name, email))
else:
pass
return contributors
def generate_git_contributors_text(contributors_emails, git_authors_emails):
# Track the email addresses seen to avoid including the same email address twice
emails_addresses_seen = set()
for name, email in contributors_emails:
emails_addresses_seen.add(email.lower())
output_lines = []
for name, email in git_authors_emails:
if email.lower() in emails_addresses_seen:
continue
# Skip Gerald, since he's part of the header:
if email == "gerald[AT]wireshark.org":
continue
ntab = 3
if len(name) >= 8*ntab:
line = "{name} <{email}>".format(name=name, email=email)
else:
ntab -= len(name)/8
if len(name) % 8:
ntab += 1
tabs = '\t'*int(ntab)
line = "{name}{tabs}<{email}>".format(name=name, tabs=tabs, email=email)
emails_addresses_seen.add(email.lower())
output_lines += [line]
return "\n".join(output_lines)
# Read authos file until we find gitlog entries, then stop
def read_authors(parsed_args):
lines = []
with open(parsed_args.authors[0], 'r', encoding='utf-8') as fh:
for line in fh.readlines():
if '= From git log =' in line:
break
lines.append(line)
return ''.join(lines)
def main():
parser = argparse.ArgumentParser(description="Generate the AUTHORS file combining existing AUTHORS file with git commit log.")
parser.add_argument("authors", metavar='authors', nargs=1, help="path to AUTHORS file")
parsed_args = parser.parse_args()
author_content = read_authors(parsed_args)
# Collect the listed contributors emails so that we don't duplicate them
# in the listing of git contributors
contributors_emails = extract_contributors(author_content)
git_authors_emails = get_git_authors()
# Then generate the text output for git contributors
git_contributors_text = generate_git_contributors_text(contributors_emails, git_authors_emails)
# Now we can write our output:
git_contributor_header = '= From git log =\n\n'
output = author_content + git_contributor_header + git_contributors_text + '\n'
with open(parsed_args.authors[0], 'w', encoding='utf-8') as fh:
fh.write(output)
if __name__ == '__main__':
main() |
Python | wireshark/tools/generate_cbor_pcap.py | #!/usr/bin/env python3
'''
Convert a CBOR diagnostic notation file into an HTTP request
for the encoded cbor.
This allows straightforward test and debugging of simple pcap files.
Copyright 2021 Brian Sipos <[email protected]>
SPDX-License-Identifier: LGPL-2.1-or-later
'''
from argparse import ArgumentParser
from io import BytesIO
import scapy
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, TCP
from scapy.layers.http import HTTP, HTTPRequest
from scapy.packet import Raw
from scapy.utils import wrpcap
from subprocess import check_output
import sys
def main():
parser = ArgumentParser()
parser.add_argument('--content-type', default='application/cbor',
help='The request content-type header')
parser.add_argument('--infile', default='-',
help='The diagnostic text input file, or "-" for stdin')
parser.add_argument('--outfile', default='-',
help='The PCAP output file, or "-" for stdout')
parser.add_argument('--intype', default='cbordiag',
choices=['cbordiag', 'raw'],
help='The input data type.')
args = parser.parse_args()
# First get the CBOR data itself
infile_name = args.infile.strip()
if infile_name != '-':
infile = open(infile_name, 'rb')
else:
infile = sys.stdin.buffer
if args.intype == 'raw':
cbordata = infile.read()
elif args.intype == 'cbordiag':
cbordata = check_output('diag2cbor.rb', stdin=infile)
# Now synthesize an HTTP request with that body
req = HTTPRequest(
Method='POST',
Host='example.com',
User_Agent='scapy',
Content_Type=args.content_type,
Content_Length=str(len(cbordata)),
) / Raw(cbordata)
# Write the request directly into pcap
outfile_name = args.outfile.strip()
if outfile_name != '-':
outfile = open(outfile_name, 'wb')
else:
outfile = sys.stdout.buffer
pkt = Ether()/IP()/TCP()/HTTP()/req
wrpcap(outfile, pkt)
if __name__ == '__main__':
sys.exit(main()) |
wireshark/tools/Get-HardenFlags.ps1 | #
# Get-HardenFlags - Checks hardening flags on the binaries.
#
# Copyright 2015 Graham Bloice <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#requires -version 2
# Get-HardenFlags does:
# call the dumpbin utility to get the binary header flags
# on all the binaries in the distribution, and then filters
# for the NXCOMPAT and DYNAMICBASE flags.
# This script will probably fail for the forseeable future.
#
# Many of our third-party libraries are compiled using MinGW-w64. Its version
# of `ld` doesn't enable the dynamicbase, nxcompat, or high-entropy-va flags
# by default. When you *do* pass --dynamicbase it strips the relocation
# section of the executable:
#
# https://sourceware.org/bugzilla/show_bug.cgi?id=19011
#
# As a result, none of the distributions that produce Windows applications
# and libraries have any sort of hardening flags enabled:
#
# https://mingw-w64.org/doku.php/download
#
<#
.SYNOPSIS
Checks the NXCOMPAT and DYNAMICBASE flags on all the binaries.
.DESCRIPTION
This script downloads and extracts third-party libraries required to compile
Wireshark.
.PARAMETER BinaryDir
Specifies the directory where the binaries may be found.
.INPUTS
-BinaryDir Directory containing the binaries to be checked.
.OUTPUTS
Any binary that doesn't have the flags is written to the error stream
.EXAMPLE
C:\PS> .\tools\Get-HardenFlags.ps1 -BinaryDir run\RelWithDebInfo
#>
Param(
[Parameter(Mandatory=$true, Position=0)]
[String]
$BinaryDir
)
# Create a list of 3rd party binaries that are not hardened
$SoftBins = (
"libpixmap.dll",
"libwimp.dll",
"libgail.dll",
"airpcap.dll",
"comerr32.dll",
"k5sprt32.dll",
"krb5_32.dll",
"libatk-1.0-0.dll",
"libcairo-2.dll",
"libffi-6.dll",
"libfontconfig-1.dll",
"libfreetype-6.dll",
"libgcc_s_sjlj-1.dll",
"libgcrypt-20.dll",
"libgdk-win32-2.0-0.dll",
"libgdk_pixbuf-2.0-0.dll",
"libgio-2.0-0.dll",
"libglib-2.0-0.dll",
"libgmodule-2.0-0.dll",
"libgmp-10.dll",
"libgnutls-28.dll",
"libgobject-2.0-0.dll",
"libgpg-error-0.dll",
"libgtk-win32-2.0-0.dll",
"libharfbuzz-0.dll",
"libhogweed-2-4.dll",
"libintl-8.dll",
"libjasper-1.dll",
"libjpeg-8.dll",
"liblzma-5.dll",
"libmaxminddb.dll",
"libnettle-4-6.dll",
"libp11-kit-0.dll",
"libpango-1.0-0.dll",
"libpangocairo-1.0-0.dll",
"libpangoft2-1.0-0.dll",
"libpangowin32-1.0-0.dll",
"libpixman-1-0.dll",
"libpng15-15.dll",
"libtasn1-6.dll",
"libtiff-5.dll",
"libxml2-2.dll",
# The x64 ones that are different
"comerr64.dll",
"k5sprt64.dll",
"krb5_64.dll",
"libgcc_s_seh-1.dll",
"libgpg-error6-0.dll",
"libpng16-16.dll",
# Unfortunately the nsis uninstaller is not hardened.
"uninstall.exe"
)
# CD into the bindir, allows Resolve-Path to work in relative mode.
Push-Location $BinaryDir
[Console]::Error.WriteLine("Checking in $BinaryDir for unhardened binaries:")
# Retrieve the list of binaries. -Filter is quicker than -Include, but can only handle one item
$Binaries = Get-ChildItem -Path $BinaryDir -Recurse -Include *.exe,*.dll
# Number of "soft" binaries found
$Count = 0;
# Iterate over the list
$Binaries | ForEach-Object {
# Get the flags
$flags = dumpbin $_ /HEADERS;
# Check for the required flags
$match = $flags | Select-String -Pattern "NX compatible", "Dynamic base"
if ($match.Count -ne 2) {
# Write-Error outputs error records, we simply want the filename
[Console]::Error.WriteLine((Resolve-Path $_ -Relative))
# Don't count files that won't ever be OK
if ($SoftBins -notcontains (Split-Path $_ -Leaf)) {
$Count++
}
}
}
exit $Count |
|
Python | wireshark/tools/html2text.py | #!/usr/bin/env python3
#
# html2text.py - converts HTML to text
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
from __future__ import unicode_literals
__author__ = "Peter Wu <[email protected]>"
__copyright__ = "Copyright 2015, Peter Wu"
__license__ = "GPL (v2 or later)"
# TODO:
# multiple list indentation levels (modify bullets?)
# maybe allow for ascii output instead of utf-8?
import sys
from textwrap import TextWrapper
try:
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
except ImportError: # Python 3
from html.parser import HTMLParser
from html.entities import name2codepoint
unichr = chr # for html entity handling
class TextHTMLParser(HTMLParser):
"""Converts a HTML document to text."""
def __init__(self):
try:
# Python 3.4
HTMLParser. __init__(self, convert_charrefs=True)
except Exception:
HTMLParser. __init__(self)
# All text, concatenated
self.output_buffer = ''
# The current text block which is being constructed
self.text_block = ''
# Whether the previous element was terminated with whitespace
self.need_space = False
# Whether to prevent word-wrapping the contents (for "pre" tag)
self.skip_wrap = False
# Quoting
self.need_quote = False
self.quote_stack = []
# Suffixes
self.need_suffix = False
self.suffix_stack = []
# track list items
self.list_item_prefix = None
self.ordered_list_index = None
self.stack_list_item_prefix = []
self.stack_ordered_list_index = []
self.list_indent_level = 0
self.list_item_indent = ""
# Indentation (for heading and paragraphs)
self.indent_levels = [0, 0]
# Don't dump CSS, scripts, etc.
self.ignore_tags = ('head', 'style', 'script')
self.ignore_level = 0
# href footnotes.
self.footnotes = []
self.href = None
def _wrap_text(self, text):
"""Wraps text, but additionally indent list items."""
initial_indent = indent = sum(self.indent_levels) * ' '
if self.list_item_prefix:
initial_indent += self.list_item_prefix
indent += ' '
kwargs = {
'width': 72,
'initial_indent': initial_indent,
'subsequent_indent': indent
}
kwargs['break_on_hyphens'] = False
wrapper = TextWrapper(**kwargs)
return '\n'.join(wrapper.wrap(text))
def _commit_block(self, newline='\n\n'):
text = self.text_block
if text:
if not self.skip_wrap:
text = self._wrap_text(text)
self.output_buffer += text + newline
self.text_block = ''
self.need_space = False
def handle_starttag(self, tag, attrs):
# end a block of text on <br>, but also flush list items which are not
# terminated.
if tag == 'br' or tag == 'li':
self._commit_block('\n')
if tag == 'code':
self.need_quote = True
self.quote_stack.append('`')
if tag == 'pre':
self.skip_wrap = True
if tag in ('ol', 'ul'):
self.list_indent_level += 1
self.list_item_indent = " " * (self.list_indent_level - 1)
self.stack_ordered_list_index.append(self.ordered_list_index)
self.stack_list_item_prefix.append(self.list_item_prefix)
# Following list items are numbered.
if tag == 'ol':
self.ordered_list_index = 1
if tag == 'ul':
self.list_item_prefix = self.list_item_indent + ' • '
if tag == 'li' and self.ordered_list_index:
self.list_item_prefix = self.list_item_indent + ' %d. ' % (self.ordered_list_index)
self.ordered_list_index += 1
if tag[0] == 'h' and len(tag) == 2 and \
(tag[1] >= '1' and tag[1] <= '6'):
self.indent_levels = [int(tag[1]) - 1, 0]
if tag == 'p':
self.indent_levels[1] = 1
if tag == 'a':
try:
href = [attr[1] for attr in attrs if attr[0] == 'href'][0]
if '://' in href: # Skip relative URLs and links.
self.href = href
except IndexError:
self.href = None
if tag == 'span':
try:
el_class = [attr[1] for attr in attrs if attr[0] == 'class'][0]
if 'menuseq' in el_class:
self.need_quote = True
self.quote_stack.append('"')
except IndexError:
pass
if tag == 'div':
try:
el_class = [attr[1] for attr in attrs if attr[0] == 'class'][0]
if 'title' in el_class.split(' '):
self.need_suffix = True
self.suffix_stack.append(':')
except IndexError:
pass
if tag in self.ignore_tags:
self.ignore_level += 1
def handle_data(self, data):
quote = ''
if self.need_quote:
quote = self.quote_stack[-1]
suffix = ''
if self.need_suffix:
suffix = self.suffix_stack.pop()
if self.ignore_level > 0:
return
elif self.skip_wrap:
block = data
else:
if self.href and data == self.href:
# This is a self link. Don't create a footnote.
self.href = None
# For normal text, fold multiple whitespace and strip
# leading and trailing spaces for the whole block (but
# keep spaces in the middle).
block = quote
if data.strip() and data[:1].isspace():
# Keep spaces in the middle
self.need_space = True
if self.need_space and data.strip() and self.text_block:
block = ' ' + quote
block += ' '.join(data.split()) + suffix
self.need_space = data[-1:].isspace()
self.text_block += block
self.need_quote = False
self.need_suffix = False
def handle_endtag(self, tag):
block_elements = 'p li ul pre ol h1 h2 h3 h4 h5 h6 tr'
#block_elements += ' dl dd dt'
if tag in block_elements.split():
self._commit_block()
if tag in ('code', 'span'):
# XXX This span isn't guaranteed to match its opening.
self.text_block += self.quote_stack.pop()
if tag in ('ol', 'ul'):
self.list_indent_level -= 1
self.list_item_indent = " " * (self.list_indent_level - 1)
self.ordered_list_index = self.stack_ordered_list_index.pop()
self.list_item_prefix = self.stack_list_item_prefix.pop()
if tag == 'pre':
self.skip_wrap = False
if tag == 'a' and self.href:
self.footnotes.append(self.href)
self.text_block += '[{0}]'.format(len(self.footnotes))
if tag in self.ignore_tags:
self.ignore_level -= 1
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
self.handle_data(unichr(name2codepoint[name]))
def close(self):
HTMLParser.close(self)
self._commit_block()
if len(self.footnotes) > 0:
self.list_item_prefix = None
self.indent_levels = [1, 0]
self.text_block = 'References'
self._commit_block()
self.indent_levels = [1, 1]
footnote_num = 1
for href in self.footnotes:
self.text_block += '{0:>2}. {1}\n'.format(footnote_num, href)
footnote_num += 1
self._commit_block('\n')
byte_output = self.output_buffer.encode('utf-8')
if hasattr(sys.stdout, 'buffer'):
sys.stdout.buffer.write(byte_output)
else:
sys.stdout.write(byte_output)
def main():
htmlparser = TextHTMLParser()
if len(sys.argv) > 1 and sys.argv[1] != '-':
filename = sys.argv[1]
f = open(filename, 'rb')
else:
filename = None
f = sys.stdin
try:
if hasattr(f, 'buffer'):
# Access raw (byte) buffer in Python 3 instead of decoded one
f = f.buffer
# Read stdin as a Unicode string
htmlparser.feed(f.read().decode('utf-8'))
finally:
if filename is not None:
f.close()
htmlparser.close()
if __name__ == '__main__':
sys.exit(main()) |
wireshark/tools/idl2deb | #!/usr/bin/env python3
# idl2deb - quick hack by W. Martin Borgert <[email protected]> to create
# Debian GNU/Linux packages from idl2wrs modules for Wireshark.
# Copyright 2003, 2008, W. Martin Borgert
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
# SPDX-License-Identifier: GPL-2.0-or-later
import optparse
import os
import string
import sys
import time
scriptinfo = """idl2deb version 2008-03-10
Copyright 2003, 2008, W. Martin Borgert
Free software, released under the terms of the GPL."""
def create_file(preserve, filename, content, mode = None):
"""Create a file with given content."""
if preserve and os.path.isfile(filename):
return
f = open(filename, 'w')
f.write(content)
f.close()
if mode:
os.chmod(filename, mode)
def create_files(version, deb, email, idl, name, preserve, iso, rfc):
"""Create all files for the .deb build process."""
base = os.path.basename(idl.lower().split(".idl")[0])
if not os.path.isdir("packaging/debian"):
os.mkdir("packaging/debian")
create_file(preserve, "packaging/debian/rules", """#!/usr/bin/make -f
include /usr/share/cdbs/1/rules/debhelper.mk
include /usr/share/cdbs/1/class/autotools.mk
PREFIX=`pwd`/packaging/debian/wireshark-giop-%s
binary-post-install/wireshark-giop-%s::
rm -f $(PREFIX)/usr/lib/wireshark/plugins/%s/*.a
""" % (base, base, version), 0o755)
create_file(preserve, "packaging/debian/control", """Source: wireshark-giop-%s
Section: net
Priority: optional
Maintainer: %s <%s>
Standards-Version: 3.6.1.0
Build-Depends: wireshark-dev, autotools-dev, debhelper, cdbs
Package: wireshark-giop-%s
Architecture: any
Depends: wireshark (= %s), ${shlibs:Depends}
Description: GIOP dissector for CORBA interface %s
This package provides a dissector for GIOP (General Inter-ORB
Protocol) for the Wireshark protocol analyser. It decodes the CORBA
(Common Object Request Broker Architecture) interfaces described
in the IDL (Interface Definition Language) file '%s.idl'.
""" % (base, name, email, base, deb, base, base))
create_file(preserve, "packaging/debian/changelog",
"""wireshark-giop-%s (0.0.1-1) unstable; urgency=low
* Automatically created package.
-- %s <%s> %s
""" % (base, name, email, rfc))
create_file(preserve, "packaging/debian/copyright",
"""This package has been created automatically by idl2deb on
%s for Debian GNU/Linux.
Wireshark: https://www.wireshark.org/
Copyright:
GPL, as evidenced by existence of GPL license file \"COPYING\".
(the GNU GPL may be viewed on Debian systems in
/usr/share/common-licenses/GPL)
""" % (iso))
def get_wrs_version():
"""Detect version of wireshark-dev package."""
deb = os.popen(
"dpkg-query -W --showformat='${Version}' wireshark-dev").read()
debv = string.find(deb, "-")
if debv == -1: debv = len(deb)
version = deb[string.find(deb, ":")+1:debv]
return version, deb
def get_time():
"""Detect current time and return ISO and RFC time string."""
currenttime = time.gmtime()
return time.strftime("%Y-%m-%d %H:%M:%S +0000", currenttime), \
time.strftime("%a, %d %b %Y %H:%M:%S +0000", currenttime)
def main():
opts = process_opts(sys.argv)
iso, rfc = get_time()
version, deb = get_wrs_version()
create_files(version, deb,
opts.email, opts.idl, opts.name, opts.preserve,
iso, rfc)
os.system("dpkg-buildpackage " + opts.dbopts)
def process_opts(argv):
"""Process command line options."""
parser = optparse.OptionParser(
version=scriptinfo,
description="""Example:
%prog -e [email protected] -i bar.idl -n \"My Name\" -d \"-rfakeroot -uc -us\"""")
parser.add_option("-d", "--dbopts",
default="", metavar="opts",
help="options for dpkg-buildpackage")
parser.add_option("-e", "--email", metavar="address",
default="[email protected]",
help="use e-mail address")
parser.add_option("-i", "--idl", metavar="idlfile",
help="IDL file to use (mandatory)")
parser.add_option("-n", "--name", default="No Name",
help="use user name", metavar="name")
parser.add_option("-p", "--preserve", action="store_true",
help="do not overwrite files")
opts, args = parser.parse_args()
if not opts.idl:
print("mandatory IDL file parameter missing")
sys.exit(1)
if not os.access(opts.idl, os.R_OK):
print("IDL file not accessible")
sys.exit(1)
return opts
if __name__ == '__main__':
main() |
|
wireshark/tools/idl2wrs | #!/bin/sh
#
# File : idl2wrs
#
# Author : Frank Singleton ([email protected])
#
# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
#
# This file is a simple shell script wrapper for the IDL to
# Wireshark dissector code.
#
# ie: wireshark_be.py and wireshark_gen.py
#
# This file is used to generate "Wireshark" dissectors from IDL descriptions.
# The output language generated is "C". It will generate code to use the
# GIOP/IIOP get_CDR_XXX API.
#
# Please see packet-giop.h in Wireshark distro for API description.
# Wireshark is available at https://www.wireshark.org/
#
# Omniidl is part of the OmniOrb distribution, and is available at
# http://omniorb.sourceforge.net/
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
# Must at least supply an IDL file
if [ $# -lt 1 ]; then
echo "idl2wrs Error: no IDL file specified."
echo "Usage: idl2wrs idl_file_name"
exit 1;
fi
# Check the file name for valid characters.
# Implementation based on Dave Taylor's validalnum shell script from his book,
# "Wicked Cool Shell Scripts", as well as Mark Rushakoff's answer he provided
# to the question posted at stackoverflow.com entitled, "How can I use the
# UNIX shell to count the number of times a letter appears in a text file?"
file=$(basename $1)
compressed="$(echo $file | sed 's/[^[:alnum:]._]//g')"
if [ "$compressed" != "$file" ]; then
echo "idl2wrs Error: Invalid file name: $file"
exit 1;
fi
# Only allow one '.' at most.
count=$(echo $compressed | awk -F. '{c += NF - 1} END {print c}')
if [ $count -gt 1 ] ; then
echo "idl2wrs Error: Invalid file name: $file"
exit 1;
fi
#
# Run wireshark backend, looking for wireshark_be.py and wireshark_gen.py
# in pythons's "site-packages" directory. If cannot find that, then
# try looking in current directory. If still cannot, then exit with
# error.
if [ -f $PYTHONPATH/site-packages/wireshark_be.py ] && [ -f $PYTHONPATH/site-packages/wireshark_gen.py ]; then
exec omniidl -p $PYTHONPATH/site-packages -b wireshark_be $@
/* not reached */
fi
# Try current directory.
if [ -f ./wireshark_be.py ] && [ -f ./wireshark_gen.py ]; then
exec omniidl -p ./ -b wireshark_be $@
/* not reached */
fi
# Could not find both wireshark_be.py AND wireshark_gen.py
# So let's just try to run it without -p, hoping that the installation
# set up a valid path.
exec omniidl -b wireshark_be $@
old code: not reached
echo "idl2wrs Error: Could not find both wireshark_be.py AND wireshark_gen.py."
echo "Please ensure you have the PYTHONPATH variable set, or that wireshark_be.py "
echo "and wireshark_gen.py exist in the current directory. "
echo
echo "On this system, PYTHONPATH is : $PYTHONPATH"
echo
exit 2
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
# |
|
Python | wireshark/tools/indexcap.py | #!/usr/bin/env python3
#
# Tool to index protocols that appears in the given capture files
#
# The script list_protos_in_cap.sh does the same thing.
#
# Copyright 2009, Kovarththanan Rajaratnam <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
from optparse import OptionParser
import multiprocessing
import sys
import os
import subprocess
import re
import pickle
import tempfile
import filecmp
import random
def extract_protos_from_file_proces(tshark, file):
try:
cmd = [tshark, "-Tfields", "-e", "frame.protocols", "-r", file]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
stdout = stdout.decode('utf-8')
if p.returncode != 0:
return (file, {})
proto_hash = {}
for line in stdout.splitlines():
if not re.match(r'^[\w:-]+$', line):
continue
for proto in line.split(':'):
proto_hash[proto] = 1 + proto_hash.setdefault(proto, 0)
return (file, proto_hash)
except KeyboardInterrupt:
return None
def extract_protos_from_file(tshark, num_procs, max_files, cap_files, cap_hash, index_file_name):
pool = multiprocessing.Pool(num_procs)
results = [pool.apply_async(extract_protos_from_file_proces, [tshark, file]) for file in cap_files]
try:
for (cur_item_idx,result_async) in enumerate(results):
file_result = result_async.get()
action = "SKIPPED" if file_result[1] is {} else "PROCESSED"
print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result[0], os.path.getsize(file_result[0])))
cap_hash.update(dict([file_result]))
except KeyboardInterrupt:
print("%s was interrupted by user" % (sys.argv[0]))
pool.terminate()
exit(1)
index_file = open(index_file_name, "wb")
pickle.dump(cap_hash, index_file)
index_file.close()
exit(0)
def dissect_file_process(tshark, tmpdir, file):
try:
(handle_o, tmpfile_o) = tempfile.mkstemp(suffix='_stdout', dir=tmpdir)
(handle_e, tmpfile_e) = tempfile.mkstemp(suffix='_stderr', dir=tmpdir)
cmd = [tshark, "-nxVr", file]
p = subprocess.Popen(cmd, stdout=handle_o, stderr=handle_e)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
return (file, True, tmpfile_o, tmpfile_e)
else:
return (file, False, tmpfile_o, tmpfile_e)
except KeyboardInterrupt:
return False
finally:
os.close(handle_o)
os.close(handle_e)
def dissect_files(tshark, tmpdir, num_procs, max_files, cap_files):
pool = multiprocessing.Pool(num_procs)
results = [pool.apply_async(dissect_file_process, [tshark, tmpdir, file]) for file in cap_files]
try:
for (cur_item_idx,result_async) in enumerate(results):
file_result = result_async.get()
action = "FAILED" if file_result[1] is False else "PASSED"
print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result[0], os.path.getsize(file_result[0])))
except KeyboardInterrupt:
print("%s was interrupted by user" % (sys.argv[0]))
pool.terminate()
exit(1)
def compare_files(tshark_bin, tmpdir, tshark_cmp, num_procs, max_files, cap_files):
pool = multiprocessing.Pool(num_procs)
results_bin = [pool.apply_async(dissect_file_process, [tshark_bin, tmpdir, file]) for file in cap_files]
results_cmp = [pool.apply_async(dissect_file_process, [tshark_cmp, tmpdir, file]) for file in cap_files]
try:
for (cur_item_idx,(result_async_bin, result_async_cmp)) in enumerate(zip(results_bin, results_cmp)):
file_result_bin = result_async_bin.get()
file_result_cmp = result_async_cmp.get()
if file_result_cmp[1] is False or file_result_bin[1] is False:
action = "FAILED (exitcode)"
if not filecmp.cmp(file_result_bin[2], file_result_cmp[2]):
action = "FAILED (stdout)"
if not filecmp.cmp(file_result_bin[3], file_result_cmp[3]):
action = "FAILED (stderr)"
else:
action = "PASSED"
os.remove(file_result_bin[2])
os.remove(file_result_cmp[2])
os.remove(file_result_bin[3])
os.remove(file_result_cmp[3])
print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result_bin[0], os.path.getsize(file_result_bin[0])))
print("%s [%u/%u] %s %u bytes" % (action, cur_item_idx+1, max_files, file_result_cmp[0], os.path.getsize(file_result_cmp[0])))
except KeyboardInterrupt:
print("%s was interrupted by user" % (sys.argv[0]))
pool.terminate()
exit(1)
def list_all_proto(cap_hash):
proto_hash = {}
for files_hash in cap_hash.values():
for proto,count in files_hash.items():
proto_hash[proto] = count + proto_hash.setdefault(proto, 0)
return proto_hash
def list_all_files(cap_hash):
files = list(cap_hash.keys())
files.sort()
return files
def list_all_proto_files(cap_hash, proto_comma_delit):
protos = [ x.strip() for x in proto_comma_delit.split(',') ]
files = []
for (file, files_hash) in cap_hash.items():
for proto in files_hash.keys():
if proto in protos:
files.append(file)
break
return files
def index_file_action(options):
return options.list_all_proto or \
options.list_all_files or \
options.list_all_proto_files or \
options.dissect_files
def find_capture_files(paths, cap_hash):
cap_files = []
for path in paths:
if os.path.isdir(path):
path = os.path.normpath(path)
for root, dirs, files in os.walk(path):
cap_files += [os.path.join(root, name) for name in files if os.path.join(root, name) not in cap_hash]
elif path not in cap_hash:
cap_files.append(path)
return cap_files
def find_tshark_executable(bin_dir):
for file in ["tshark.exe", "tshark"]:
tshark = os.path.join(bin_dir, file)
if os.access(tshark, os.X_OK):
return tshark
return None
def main():
parser = OptionParser(usage="usage: %prog [options] index_file [file_1|dir_1 [.. file_n|dir_n]]")
parser.add_option("-d", "--dissect-files", dest="dissect_files", default=False, action="store_true",
help="Dissect all matching files")
parser.add_option("-m", "--max-files", dest="max_files", default=sys.maxsize, type="int",
help="Max number of files to process")
parser.add_option("-b", "--binary-dir", dest="bin_dir", default=os.getcwd(),
help="Directory containing tshark executable")
parser.add_option("-c", "--compare-dir", dest="compare_dir", default=None,
help="Directory containing tshark executable which is used for comparison")
parser.add_option("-j", dest="num_procs", default=multiprocessing.cpu_count(), type=int,
help="Max number of processes to spawn")
parser.add_option("-r", "--randomize", default=False, action="store_true",
help="Randomize the file list order")
parser.add_option("", "--list-all-proto", dest="list_all_proto", default=False, action="store_true",
help="List all protocols in index file")
parser.add_option("", "--list-all-files", dest="list_all_files", default=False, action="store_true",
help="List all files in index file")
parser.add_option("", "--list-all-proto-files", dest="list_all_proto_files", default=False,
metavar="PROTO_1[, .. PROTO_N]",
help="List all files in index file containing the given protocol")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("index_file is a required argument")
if len(args) == 1 and not index_file_action(options):
parser.error("one capture file/directory must be specified")
if options.dissect_files and not options.list_all_files and not options.list_all_proto_files:
parser.error("--list-all-files or --list-all-proto-files must be specified")
if options.dissect_files and not options.compare_dir is None:
parser.error("--dissect-files and --compare-dir cannot be specified at the same time")
index_file_name = args.pop(0)
paths = args
cap_hash = {}
try:
index_file = open(index_file_name, "rb")
print("index file: %s [OPENED]" % index_file.name)
cap_hash = pickle.load(index_file)
index_file.close()
print("%d files" % len(cap_hash))
except IOError:
print("index file: %s [NEW]" % index_file_name)
if options.list_all_proto:
print(list_all_proto(cap_hash))
exit(0)
indexed_files = []
if options.list_all_files:
indexed_files = list_all_files(cap_hash)
print(indexed_files)
if options.list_all_proto_files:
indexed_files = list_all_proto_files(cap_hash, options.list_all_proto_files)
print(indexed_files)
tshark_bin = find_tshark_executable(options.bin_dir)
if not tshark_bin is None:
print("tshark: %s [FOUND]" % tshark_bin)
else:
print("tshark: %s [MISSING]" % tshark_bin)
exit(1)
if not options.compare_dir is None:
tshark_cmp = find_tshark_executable(options.compare_dir)
if not tshark_cmp is None:
print("tshark: %s [FOUND]" % tshark_cmp)
else:
print("tshark: %s [MISSING]" % tshark_cmp)
exit(1)
if options.dissect_files or options.compare_dir:
cap_files = indexed_files
elif options.list_all_proto_files or options.list_all_files:
exit(0)
else:
cap_files = find_capture_files(paths, cap_hash)
if options.randomize:
random.shuffle(cap_files)
else:
cap_files.sort()
options.max_files = min(options.max_files, len(cap_files))
print("%u total files, %u working files" % (len(cap_files), options.max_files))
cap_files = cap_files[:options.max_files]
if options.compare_dir or options.dissect_files:
tmpdir = tempfile.mkdtemp()
print("Temporary working dir: %s" % tmpdir)
try:
if options.compare_dir:
compare_files(tshark_bin, tmpdir, tshark_cmp, options.num_procs, options.max_files, cap_files)
elif options.dissect_files:
dissect_files(tshark_bin, tmpdir, options.num_procs, options.max_files, cap_files)
else:
extract_protos_from_file(tshark_bin, options.num_procs, options.max_files, cap_files, cap_hash, index_file_name)
finally:
# Dissection may result in a non-empty directory.
if options.compare_dir:
os.rmdir(tmpdir)
if __name__ == "__main__":
main() |
Python | wireshark/tools/lex.py | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2015,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# -----------------------------------------------------------------------------
__version__ = '3.8'
__tabversion__ = '3.8'
import re
import sys
import types
import copy
import os
import inspect
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = %s\n' % repr(self.lextokens))
tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
nonstate = 1
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
lines, linen = inspect.getsourcelines(module)
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
# Alternative spelling of the TOKEN decorator
Token = TOKEN |
Perl | wireshark/tools/licensecheck.pl | #!/usr/bin/perl
# -*- tab-width: 8; indent-tabs-mode: t; cperl-indent-level: 4 -*-
# This script was originally based on the script of the same name from
# the KDE SDK (by [email protected])
#
# This version is
# Copyright (C) 2007, 2008 Adam D. Barratt
# Copyright (C) 2012 Francesco Poli
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <https://www.gnu.org/licenses/>.
# Originally copied from Debian's devscripts. A more modern version of
# this can be found at
# https://anonscm.debian.org/git/pkg-perl/packages/licensecheck.git/
=head1 NAME
licensecheck - simple license checker for source files
=head1 SYNOPSIS
B<licensecheck> B<--help>|B<--version>
B<licensecheck> [B<--no-conf>] [B<--verbose>] [B<--copyright>]
[B<-l>|B<--lines=>I<N>] [B<-i>|B<--ignore=>I<regex>] [B<-c>|B<--check=>I<regex>]
[B<-m>|B<--machine>] [B<-r>|B<--recursive>] [B<-e>|B<--encoding=>I<...>]
I<list of files and directories to check>
=head1 DESCRIPTION
B<licensecheck> attempts to determine the license that applies to each file
passed to it, by searching the start of the file for text belonging to
various licenses.
If any of the arguments passed are directories, B<licensecheck> will add
the files contained within to the list of files to process.
=head1 OPTIONS
=over 4
=item B<--verbose>, B<--no-verbose>
Specify whether to output the text being processed from each file before
the corresponding license information.
Default is to be quiet.
=item B<-l=>I<N>, B<--lines=>I<N>
Specify the number of lines of each file's header which should be parsed
for license information. (Default is 60).
=item B<--tail=>I<N>
By default, the last 5k bytes of each files are parsed to get license
information. You may use this option to set the size of this parsed chunk.
You may set this value to 0 to avoid parsing the end of the file.
=item B<-i=>I<regex>, B<--ignore=>I<regex>
When processing the list of files and directories, the regular
expression specified by this option will be used to indicate those which
should not be considered (e.g. backup files, VCS metadata).
=item B<-r>, B<--recursive>
Specify that the contents of directories should be added
recursively.
=item B<-c=>I<regex>, B<--check=>I<regex>
Specify a pattern against which filenames will be matched in order to
decide which files to check the license of.
The default includes common source files.
=item B<-s>, B<--skipped>
Specify whether to show skipped files, i.e. files found which do not
match the check regexp (see C<--check> option). Default is to not show
skipped files.
Note that ignored files (like C<.git> or C<.svn>) are not shown even when
this option is used.
=item B<--copyright>
Also display copyright text found within the file
=item B<-e> B<--encoding>
Specifies input encoding of source files. By default, input files are
not decoded. When encoding is specified, license and copyright
information are printed on STDOUT as utf8, or garbage if you got the
encoding wrong.
=item B<-m>, B<--machine>
Display the information in a machine readable way, i.e. in the form
<file><tab><license>[<tab><copyright>] so that it can be easily sorted
and/or filtered, e.g. with the B<awk> and B<sort> commands.
Note that using the B<--verbose> option will kill the readability.
=item B<--no-conf>, B<--noconf>
Do not read any configuration files. This can only be used as the first
option given on the command line.
=back
=head1 CONFIGURATION VARIABLES
The two configuration files F</etc/devscripts.conf> and
F<~/.devscripts> are sourced by a shell in that order to set
configuration variables. Command line options can be used to override
configuration file settings. Environment variable settings are
ignored for this purpose. The currently recognised variables are:
=over 4
=item B<LICENSECHECK_VERBOSE>
If this is set to I<yes>, then it is the same as the B<--verbose> command
line parameter being used. The default is I<no>.
=item B<LICENSECHECK_PARSELINES>
If this is set to a positive number then the specified number of lines
at the start of each file will be read whilst attempting to determine
the license(s) in use. This is equivalent to the B<--lines> command line
option.
=back
=head1 LICENSE
This code is copyright by Adam D. Barratt <I<[email protected]>>,
all rights reserved; based on a script of the same name from the KDE
SDK, which is copyright by <I<[email protected]>>.
This program comes with ABSOLUTELY NO WARRANTY.
You are free to redistribute this code under the terms of the GNU
General Public License, version 2 or later.
=head1 AUTHOR
Adam D. Barratt <[email protected]>
=cut
# see https://stackoverflow.com/questions/6162484/why-does-modern-perl-avoid-utf-8-by-default/6163129#6163129
use v5.14;
use utf8;
use strict;
use autodie;
use warnings;
use warnings qw< FATAL utf8 >;
use Getopt::Long qw(:config gnu_getopt);
use File::Basename;
use File::stat;
use IO::File;
use Fcntl qw/:seek/;
binmode STDOUT, ':utf8';
my $progname = basename($0);
# From dpkg-source
my $default_ignore_regex = qr!
# Ignore general backup files
~$|
# Ignore emacs recovery files
(?:^|/)\.#|
# Ignore vi swap files
(?:^|/)\..*\.swp$|
# Ignore baz-style junk files or directories
(?:^|/),,.*(?:$|/.*$)|
# File-names that should be ignored (never directories)
(?:^|/)(?:DEADJOE|\.cvsignore|\.arch-inventory|\.bzrignore|\.gitignore)$|
# File or directory names that should be ignored
(?:^|/)(?:CVS|RCS|\.pc|\.deps|\{arch\}|\.arch-ids|\.svn|\.hg|_darcs|\.git|
\.shelf|_MTN|\.bzr(?:\.backup|tags)?)(?:$|/.*$)
!x;
# The original Debian version checks Markdown (.md and .markdown) files.
# If we add those extensions back, we should add Asciidoctor (.adoc) as
# well, and add SPDX IDs to all of those files.
my $default_check_regex =
qr!
\.( # search for file suffix
c(c|pp|xx)? # c and c++
|h(h|pp|xx)? # header files for c and c++
|S
|css|less # HTML css and similar
|f(77|90)?
|go
|groovy
|lisp
|scala
|clj
|p(l|m)?6?|t|xs|pod6? # perl5 or perl6
|sh
|php
|py(|x)
|rb
|java
|js
|vala
|el
|sc(i|e)
|cs
|pas
|inc
|dtd|xsl
|mod
|m
|tex
|mli?
|(c|l)?hs
)
$
!x;
# also used to cleanup
my $copyright_indicator_regex
= qr!
(?:copyright # The full word
|copr\. # Legally-valid abbreviation
|\xc2\xa9 # Unicode copyright sign encoded in iso8859
|\x{00a9} # Unicode character COPYRIGHT SIGN
#|© # Unicode character COPYRIGHT SIGN
|\(c\) # Legally-null representation of sign
)
!lix;
my $copyright_indicator_regex_with_capture = qr!$copyright_indicator_regex(?::\s*|\s+)(\S.*)$!lix;
# avoid ditching things like <[email protected]>
my $copyright_disindicator_regex
= qr{
\b(?:info(?:rmation)?(?!@) # Discussing copyright information
|(notice|statement|claim|string)s? # Discussing the notice
|is|in|to # Part of a sentence
|(holder|owner)s? # Part of a sentence
|ownership # Part of a sentence
)\b
}ix;
my $copyright_predisindicator_regex
= qr!(
^[#]define\s+.*\(c\) # #define foo(c) -- not copyright
)!ix;
my $modified_conf_msg;
my %OPT=(
verbose => '',
lines => '',
noconf => '',
ignore => '',
check => '',
recursive => 0,
copyright => 0,
machine => 0,
text => 0,
skipped => 0,
);
my $def_lines = 60;
my $def_tail = 5000; # roughly 60 lines of 80 chars
# Read configuration files and then command line
# This is boilerplate
if (@ARGV and $ARGV[0] =~ /^--no-?conf$/) {
$modified_conf_msg = " (no configuration files read)";
shift;
} else {
my @config_files = ('/etc/devscripts.conf', '~/.devscripts');
my %config_vars = (
'LICENSECHECK_VERBOSE' => 'no',
'LICENSECHECK_PARSELINES' => $def_lines,
);
my %config_default = %config_vars;
my $shell_cmd;
# Set defaults
foreach my $var (keys %config_vars) {
$shell_cmd .= qq[$var="$config_vars{$var}";\n];
}
$shell_cmd .= 'for file in ' . join(" ", @config_files) . "; do\n";
$shell_cmd .= '[ -f $file ] && . $file; done;' . "\n";
# Read back values
foreach my $var (keys %config_vars) { $shell_cmd .= "echo \$$var;\n" }
my $shell_out = `/bin/bash -c '$shell_cmd'`;
@config_vars{keys %config_vars} = split /\n/, $shell_out, -1;
# Check validity
$config_vars{'LICENSECHECK_VERBOSE'} =~ /^(yes|no)$/
or $config_vars{'LICENSECHECK_VERBOSE'} = 'no';
$config_vars{'LICENSECHECK_PARSELINES'} =~ /^[1-9][0-9]*$/
or $config_vars{'LICENSECHECK_PARSELINES'} = $def_lines;
foreach my $var (sort keys %config_vars) {
if ($config_vars{$var} ne $config_default{$var}) {
$modified_conf_msg .= " $var=$config_vars{$var}\n";
}
}
$modified_conf_msg ||= " (none)\n";
chomp $modified_conf_msg;
$OPT{'verbose'} = $config_vars{'LICENSECHECK_VERBOSE'} eq 'yes' ? 1 : 0;
$OPT{'lines'} = $config_vars{'LICENSECHECK_PARSELINES'};
}
GetOptions(\%OPT,
"help|h",
"check|c=s",
"copyright",
"encoding|e=s",
"ignore|i=s",
"lines|l=i",
"machine|m",
"noconf|no-conf",
"recursive|r",
"skipped|s",
"tail",
"text|t",
"verbose!",
"version|v",
) or die "Usage: $progname [options] filelist\nRun $progname --help for more details\n";
$OPT{'lines'} = $def_lines if $OPT{'lines'} !~ /^[1-9][0-9]*$/;
my $ignore_regex = length($OPT{ignore}) ? qr/$OPT{ignore}/ : $default_ignore_regex;
my $check_regex = $default_check_regex;
$check_regex = qr/$OPT{check}/ if length $OPT{check};
if ($OPT{'noconf'}) {
fatal("--no-conf is only acceptable as the first command-line option!");
}
if ($OPT{'help'}) { help(); exit 0; }
if ($OPT{'version'}) { version(); exit 0; }
if ($OPT{text}) {
warn "$0 warning: option -text is deprecated\n"; # remove -text end 2015
}
die "Usage: $progname [options] filelist\nRun $progname --help for more details\n" unless @ARGV;
$OPT{'lines'} = $def_lines if not defined $OPT{'lines'};
my @files = ();
my @find_args = ();
my $files_count = @ARGV;
push @find_args, qw(-maxdepth 1) unless $OPT{'recursive'};
push @find_args, qw(-follow -type f -print);
while (@ARGV) {
my $file = shift @ARGV;
if (-d $file) {
open my $FIND, '-|', 'find', $file, @find_args
or die "$progname: couldn't exec find: $!\n";
while (my $found = <$FIND>) {
chomp ($found);
# Silently skip empty files or ignored files
next if -z $found or $found =~ $ignore_regex;
if ( not $check_regex or $found =~ $check_regex ) {
# Silently skip empty files or ignored files
push @files, $found ;
}
else {
warn "skipped file $found\n" if $OPT{skipped};
}
}
close $FIND;
}
elsif ($file =~ $ignore_regex) {
# Silently skip ignored files
next;
}
elsif ( $files_count == 1 or not $check_regex or $file =~ $check_regex ) {
push @files, $file;
}
else {
warn "skipped file $file\n" if $OPT{skipped};
}
}
while (@files) {
my $file = shift @files;
my $content = '';
my $copyright_match;
my $copyright = '';
my $st = stat $file;
my $enc = $OPT{encoding} ;
my $mode = $enc ? "<:encoding($enc)" : '<';
# need to use "<" when encoding is unknown otherwise we break compatibility
my $fh = IO::File->new ($file ,$mode) or die "Unable to access $file\n";
while ( my $line = $fh->getline ) {
last if ($fh->input_line_number > $OPT{'lines'});
$content .= $line;
}
my %copyrights = extract_copyright($content);
print qq(----- $file header -----\n$content----- end header -----\n\n)
if $OPT{'verbose'};
my $license = parselicense(clean_cruft_and_spaces(clean_comments($content)));
$copyright = join(" / ", reverse sort values %copyrights);
if ( not $copyright and $license eq 'UNKNOWN') {
my $position = $fh->tell; # See IO::Seekable
my $tail_size = $OPT{tail} // $def_tail;
my $jump = $st->size - $tail_size;
$jump = $position if $jump < $position;
my $tail ;
if ( $tail_size and $jump < $st->size) {
$fh->seek($jump, SEEK_SET) ; # also IO::Seekable
$tail .= join('',$fh->getlines);
}
print qq(----- $file tail -----\n$tail----- end tail -----\n\n)
if $OPT{'verbose'};
%copyrights = extract_copyright($tail);
$license = parselicense(clean_cruft_and_spaces(clean_comments($tail)));
$copyright = join(" / ", reverse sort values %copyrights);
}
$fh->close;
if ($OPT{'machine'}) {
print "$file\t$license";
print "\t" . ($copyright or "*No copyright*") if $OPT{'copyright'};
print "\n";
} else {
print "$file: ";
print "*No copyright* " unless $copyright;
print $license . "\n";
print " [Copyright: " . $copyright . "]\n"
if $copyright and $OPT{'copyright'};
print "\n" if $OPT{'copyright'};
}
}
sub extract_copyright {
my $content = shift;
my @c = split /\n/, clean_comments($content);
my %copyrights;
my $lines_after_copyright_block = 0;
my $in_copyright_block = 0;
while (@c) {
my $line = shift @c ;
my $copyright_match = parse_copyright($line, \$in_copyright_block) ;
if ($copyright_match) {
while (@c and $copyright_match =~ /\d[,.]?\s*$/) {
# looks like copyright end with a year, assume the owner is on next line(s)
$copyright_match .= ' '. shift @c;
}
$copyright_match =~ s/\s+/ /g;
$copyright_match =~ s/\s*$//;
$copyrights{lc("$copyright_match")} = "$copyright_match";
}
elsif (scalar keys %copyrights) {
# skip remaining lines if a copyright blocks was found more than 5 lines ago.
# so a copyright block may contain up to 5 blank lines, but no more
last if $lines_after_copyright_block++ > 5;
}
}
return %copyrights;
}
sub parse_copyright {
my $data = shift ;
my $in_copyright_block_ref = shift;
my $copyright = '';
my $match;
if ( $data !~ $copyright_predisindicator_regex) {
#print "match against ->$data<-\n";
if ($data =~ $copyright_indicator_regex_with_capture) {
$match = $1;
$$in_copyright_block_ref = 1;
# Ignore lines matching "see foo for copyright information" etc.
if ($match !~ $copyright_disindicator_regex) {
# De-cruft
$match =~ s/$copyright_indicator_regex//igx;
$match =~ s/^\s+//;
$match =~ s/\s*\bby\b\s*/ /;
$match =~ s/([,.])?\s*$//;
$match =~ s/\s{2,}/ /g;
$match =~ s/\\//g; # de-cruft nroff files
$match =~ s/\s*[*#]\s*$//;
$copyright = $match;
}
}
elsif ($$in_copyright_block_ref and $data =~ /^\d{2,}[,\s]+/) {
# following lines beginning with a year are supposed to be
# continued copyright blocks
$copyright = $data;
}
else {
$$in_copyright_block_ref = 0;
}
}
return $copyright;
}
sub clean_comments {
local $_ = shift or return q{};
# Remove generic comments: look for 4 or more lines beginning with
# regular comment pattern and trim it. Fall back to old algorithm
# if no such pattern found.
my @matches = m/^\s*((?:[^a-zA-Z0-9\s]{1,3}|\bREM\b))\s\w/mg;
if (@matches >= 4) {
my $comment_re = qr/\s*[\Q$matches[0]\E]{1,3}\s*/;
s/^$comment_re//mg;
}
# Remove Fortran comments
s/^[cC] //gm;
# Remove C / C++ comments
s#(\*/|/[/*])##g;
return $_;
}
sub clean_cruft_and_spaces {
local $_ = shift or return q{};
tr/\t\r\n/ /;
# this also removes quotes
tr% A-Za-z.+,@:;0-9\(\)/-%%cd;
tr/ //s;
return $_;
}
sub help {
print <<"EOF";
Usage: $progname [options] filename [filename ...]
Valid options are:
--help, -h Display this message
--version, -v Display version and copyright info
--no-conf, --noconf Don't read devscripts config files; must be
the first option given
--verbose Display the header of each file before its
license information
--skipped, -s Show skipped files
--lines, -l Specify how many lines of the file header
should be parsed for license information
(Default: $def_lines)
--tail Specify how many bytes to parse at end of file
(Default: $def_tail)
--check, -c Specify a pattern indicating which files should
be checked
(Default: '$default_check_regex')
--machine, -m Display in a machine readable way (good for awk)
--recursive, -r Add the contents of directories recursively
--copyright Also display the file's copyright
--ignore, -i Specify that files / directories matching the
regular expression should be ignored when
checking files
(Default: '$default_ignore_regex')
Default settings modified by devscripts configuration files:
$modified_conf_msg
EOF
}
sub version {
print <<"EOF";
This is $progname, from the Debian devscripts package, version 2.16.2
Copyright (C) 2007, 2008 by Adam D. Barratt <adam\@adam-barratt.org.uk>; based
on a script of the same name from the KDE SDK by <dfaure\@kde.org>.
This program comes with ABSOLUTELY NO WARRANTY.
You are free to redistribute this code under the terms of the
GNU General Public License, version 2, or (at your option) any
later version.
EOF
}
sub parselicense {
my ($licensetext) = @_;
my $gplver = "";
my $extrainfo = "";
my $license = "";
if ($licensetext =~ /version ([^ ]+)(?: of the License)?,? or(?: \(at your option\))? version (\d(?:[.-]\d+)*)/) {
$gplver = " (v$1 or v$2)";
} elsif ($licensetext =~ /version ([^, ]+?)[.,]? (?:\(?only\)?.? )?(?:of the GNU (Affero )?(Lesser |Library )?General Public License )?(as )?published by the Free Software Foundation/i or
$licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License (?:as )?published by the Free Software Foundation[;,] version ([^, ]+?)[.,]? /i) {
$gplver = " (v$1)";
} elsif ($licensetext =~ /GNU (?:Affero )?(?:Lesser |Library )?General Public License\s*(?:[(),GPL]+)\s*version (\d+(?:\.\d+)?)[ \.]/i) {
$gplver = " (v$1)";
} elsif ($licensetext =~ /either version ([^ ]+)(?: of the License)?, or (?:\(at your option\) )?any later version/) {
$gplver = " (v$1 or later)";
} elsif ($licensetext =~ /GPL\sas\spublished\sby\sthe\sFree\sSoftware\sFoundation,\sversion\s([\d.]+)/i ) {
$gplver = " (v$1)";
} elsif ($licensetext =~ /SPDX-License-Identifier:\s+GPL-([1-9])\.0-or-later/i ){
$gplver = " (v$1 or later)";
} elsif ($licensetext =~ /SPDX-License-Identifier:\s+GPL-([1-9])\.0[^+]/i ) {
$gplver = " (v$1)";
} elsif ($licensetext =~ /SPDX-License-Identifier:\s+GPL-([1-9])\.0\+/i ) {
$gplver = " (v$1 or later)";
} elsif ($licensetext =~ /SPDX-License-Identifier:\s+LGPL-([1-9])\.[0-1]\-or-later/i ) {
$gplver = " (v$1 or later)";
}
if ($licensetext =~ /(?:675 Mass Ave|59 Temple Place|51 Franklin Steet|02139|02111-1307)/i) {
$extrainfo = " (with incorrect FSF address)$extrainfo";
}
if ($licensetext =~ /permission (?:is (also granted|given))? to link (the code of )?this program with (any edition of )?(Qt|the Qt library)/i) {
$extrainfo = " (with Qt exception)$extrainfo"
}
if ($licensetext =~ /As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice/) {
$extrainfo = " (with Bison parser exception)$extrainfo";
}
# exclude blurb found in boost license text
if ($licensetext =~ /(All changes made in this file will be lost|DO NOT (EDIT|delete this file)|Generated (automatically|by|from)|generated.*file)/i
and $licensetext !~ /unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor/) {
$license = "GENERATED FILE";
}
if ($licensetext =~ /(are made available|(is free software.? )?you can redistribute (it|them) and(?:\/|\s+)or modify (it|them)|is licensed) under the terms of (version [^ ]+ of )?the (GNU (Library |Lesser )General Public License|LGPL)/i) {
$license = "LGPL$gplver$extrainfo $license";
}
# For Perl modules handled by Dist::Zilla
elsif ($licensetext =~ /this is free software,? licensed under:? (?:the )?(?:GNU (?:Library |Lesser )General Public License|LGPL),? version ([\d\.]+)/i) {
$license = "LGPL (v$1) $license";
}
if ($licensetext =~ /is free software.? you can redistribute (it|them) and(?:\/|\s+)or modify (it|them) under the terms of the (GNU Affero General Public License|AGPL)/i) {
$license = "AGPL$gplver$extrainfo $license";
}
if ($licensetext =~ /(is free software.? )?you (can|may) redistribute (it|them) and(?:\/|\s+)or modify (it|them) under the terms of (?:version [^ ]+ (?:\(?only\)? )?of )?the GNU General Public License/i) {
$license = "GPL$gplver$extrainfo $license";
}
if ($licensetext =~ /is distributed under the terms of the GNU General Public License,/
and length $gplver) {
$license = "GPL$gplver$extrainfo $license";
}
if ($licensetext =~ /SPDX-License-Identifier:\s+GPL/i and length $gplver) {
$license = "GPL$gplver$extrainfo $license";
}
if ($licensetext =~ /SPDX-License-Identifier:\s+GPL-2.0-or-later/i and length $gplver) {
$license = "GPL$gplver$extrainfo";
}
if ($licensetext =~ /SPDX-License-Identifier:\s+LGPL/i and length $gplver) {
$license = "LGPL$gplver$extrainfo $license";
}
if ($licensetext =~ /SPDX-License-Identifier:\s+Zlib/i) {
$license = "zlib/libpng $license";
}
if ($licensetext =~ /SPDX-License-Identifier:\s+BSD-3-Clause/i) {
$license = 'BSD (3 clause)';
}
if ($licensetext =~ /SPDX-License-Identifier:\s+BSD-2-Clause/i) {
$license = 'BSD (2 clause)';
}
if ($licensetext =~ /SPDX-License-Identifier:\s+BSD-1-Clause/i) {
$license = 'BSD (1 clause)';
}
if ($licensetext =~ /SPDX-License-Identifier:\s+MIT/i) {
$license = 'MIT/X11 (BSD like)';
}
if ($licensetext =~ /SPDX-License-Identifier:\s+ISC/i) {
$license = 'ISC';
}
if ($licensetext =~ /(?:is|may be)\s(?:(?:distributed|used).*?terms|being\s+released).*?\b(L?GPL)\b/) {
my $v = $gplver || ' (unversioned/unknown version)';
$license = "$1$v $license";
}
if ($licensetext =~ /the rights to distribute and use this software as governed by the terms of the Lisp Lesser General Public License|\bLLGPL\b/ ) {
$license = "LLGPL $license";
}
if ($licensetext =~ /This file is part of the .*Qt GUI Toolkit. This file may be distributed under the terms of the Q Public License as defined/) {
$license = "QPL (part of Qt) $license";
} elsif ($licensetext =~ /may (be distributed|redistribute it) under the terms of the Q Public License/) {
$license = "QPL $license";
}
if ($licensetext =~ /opensource\.org\/licenses\/mit-license\.php/) {
$license = "MIT/X11 (BSD like) $license";
} elsif ($licensetext =~ /Permission is hereby granted, free of charge, to any person obtaining a copy of this software and(\/or)? associated documentation files \(the (Software|Materials)\), to deal in the (Software|Materials)/) {
$license = "MIT/X11 (BSD like) $license";
} elsif ($licensetext =~ /Permission is hereby granted, without written agreement and without license or royalty fees, to use, copy, modify, and distribute this software and its documentation for any purpose/) {
$license = "MIT/X11 (BSD like) $license";
}
if ($licensetext =~ /Permission to use, copy, modify, and(\/or)? distribute this software for any purpose with or without fee is hereby granted, provided.*copyright notice.*permission notice.*all copies/) {
$license = "ISC $license";
}
if ($licensetext =~ /THIS SOFTWARE IS PROVIDED .*AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY/) {
if ($licensetext =~ /All advertising materials mentioning features or use of this software must display the following acknowledge?ment.*This product includes software developed by/i) {
$license = "BSD (4 clause) $license";
} elsif ($licensetext =~ /(The name(?:\(s\))? .*? may not|Neither the (names? .*?|authors?) nor the names of( (its|their|other|any))? contributors may) be used to endorse or promote products derived from this software/i) {
$license = "BSD (3 clause) $license";
} elsif ($licensetext =~ /Redistributions in binary form must reproduce the above copyright notice/i) {
$license = "BSD (2 clause) $license";
} else {
$license = "BSD $license";
}
}
if ($licensetext =~ /Mozilla Public License,? (?:(?:Version|v\.)\s+)?(\d+(?:\.\d+)?)/) {
$license = "MPL (v$1) $license";
}
elsif ($licensetext =~ /Mozilla Public License,? \((?:Version|v\.) (\d+(?:\.\d+)?)\)/) {
$license = "MPL (v$1) $license";
}
# match when either:
# - the text *begins* with "The Artistic license v2.0" which is (hopefully) the actual artistic license v2.0 text.
# - a license grant is found. i.e something like "this is free software, licensed under the artistic license v2.0"
if ($licensetext =~ /(?:^\s*|(?:This is free software, licensed|Released|be used|use and modify this (?:module|software)) under (?:the terms of )?)[Tt]he Artistic License ([v\d.]*\d)/) {
$license = "Artistic (v$1) $license";
}
if ($licensetext =~ /is free software under the Artistic [Ll]icense/) {
$license = "Artistic $license";
}
if ($licensetext =~ /This program is free software; you can redistribute it and\/or modify it under the same terms as Perl itself/) {
$license = "Perl $license";
}
if ($licensetext =~ /under the Apache License, Version ([^ ]+)/) {
$license = "Apache (v$1) $license";
}
if ($licensetext =~ /(THE BEER-WARE LICENSE)/i) {
$license = "Beerware $license";
}
if ($licensetext =~ /distributed under the terms of the FreeType project/i) {
$license = "FreeType $license"; # aka FTL see https://www.freetype.org/license.html
}
if ($licensetext =~ /This source file is subject to version ([^ ]+) of the PHP license/) {
$license = "PHP (v$1) $license";
}
if ($licensetext =~ /under the terms of the CeCILL /) {
$license = "CeCILL $license";
}
if ($licensetext =~ /under the terms of the CeCILL-([^ ]+) /) {
$license = "CeCILL-$1 $license";
}
if ($licensetext =~ /under the SGI Free Software License B/) {
$license = "SGI Free Software License B $license";
}
if ($licensetext =~ /is in the public domain/i) {
$license = "Public domain $license";
}
if ($licensetext =~ /terms of the Common Development and Distribution License(, Version ([^(]+))? \(the License\)/) {
$license = "CDDL " . ($1 ? "(v$2) " : '') . $license;
}
if ($licensetext =~ /Microsoft Permissive License \(Ms-PL\)/) {
$license = "Ms-PL $license";
}
if ($licensetext =~ /Licensed under the Academic Free License version ([\d.]+)/) {
$license = $1 ? "AFL-$1" : "AFL";
}
if ($licensetext =~ /This program and the accompanying materials are made available under the terms of the Eclipse Public License v?([\d.]+)/) {
$license = $1 ? "EPL-$1" : "EPL";
}
# quotes were removed by clean_comments function
if ($licensetext =~ /Permission is hereby granted, free of charge, to any person or organization obtaining a copy of the software and accompanying documentation covered by this license \(the Software\)/ or
$licensetext =~ /Boost Software License([ ,-]+Version ([^ ]+)?(\.))/i) {
$license = "BSL " . ($1 ? "(v$2) " : '') . $license;
}
if ($licensetext =~ /PYTHON SOFTWARE FOUNDATION LICENSE (VERSION ([^ ]+))/i) {
$license = "PSF " . ($1 ? "(v$2) " : '') . $license;
}
if ($licensetext =~ /The origin of this software must not be misrepresented.*Altered source versions must be plainly marked as such.*This notice may not be removed or altered from any source distribution/ or
$licensetext =~ /see copyright notice in zlib\.h/) {
$license = "zlib/libpng $license";
} elsif ($licensetext =~ /This code is released under the libpng license/) {
$license = "libpng $license";
}
if ($licensetext =~ /Do What The Fuck You Want To Public License, Version ([^, ]+)/i) {
$license = "WTFPL (v$1) $license";
}
if ($licensetext =~ /Do what The Fuck You Want To Public License/i) {
$license = "WTFPL $license";
}
if ($licensetext =~ /(License WTFPL|Under (the|a) WTFPL)/i) {
$license = "WTFPL $license";
}
if ($licensetext =~ /SPDX-License-Identifier:\s+\(([a-zA-Z0-9-\.]+)\s+OR\s+([a-zA-Z0-9-\.]+)\)/i) {
my $license1 = $1;
my $license2 = $2;
$license = parselicense("SPDX-License-Identifier: $license1") . ";" . parselicense("SPDX-License-Identifier: $license2");
}
$license = "UNKNOWN" if (!length($license));
# Remove trailing spaces.
$license =~ s/\s+$//;
return $license;
}
sub fatal {
my ($pack,$file,$line);
($pack,$file,$line) = caller();
(my $msg = "$progname: fatal error at line $line:\n@_\n") =~ tr/\0//d;
$msg =~ s/\n\n$/\n/;
die $msg;
} |
Shell Script | wireshark/tools/list_protos_in_cap.sh | #!/bin/bash
# List the protocols (dissectors) used in capture file(s)
#
# The Python script indexcap.py does the same thing.
#
# This script extracts the protocol names contained in a given capture file.
# This is useful for generating a "database" (flat file :-)) of in what file
# a given protocol can be found.
#
# Output consists of the file name followed by the protocols, for example:
# /path/to/the/file.pcap eth ip sctp
#
# Copyright 2012 Jeff Morriss <jeff.morriss.ws [AT] gmail.com>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
# Directory containing binaries. Default current directory.
WS_BIN_PATH=${WS_BIN_PATH:-.}
# Tweak the following to your liking. Editcap must support "-E".
TSHARK="$WS_BIN_PATH/tshark"
CAPINFOS="$WS_BIN_PATH/capinfos"
if [ "$WS_BIN_PATH" = "." ]; then
export WIRESHARK_RUN_FROM_BUILD_DIRECTORY=
fi
NOTFOUND=0
for i in "$TSHARK" "$CAPINFOS"
do
if [ ! -x $i ]
then
echo "Couldn't find $i" 1>&2
NOTFOUND=1
fi
done
if [ $NOTFOUND -eq 1 ]
then
exit 1
fi
# Make sure we have at least one file
FOUND=0
for CF in "$@"
do
if [ "$OSTYPE" == "cygwin" ]
then
CF=`cygpath --windows "$CF"`
fi
"$CAPINFOS" "$CF" > /dev/null 2>&1 && FOUND=1
if [ $FOUND -eq 1 ]
then
break
fi
done
if [ $FOUND -eq 0 ] ; then
cat <<FIN
Error: No valid capture files found.
Usage: `basename $0` capture file 1 [capture file 2]...
FIN
exit 1
fi
for CF in "$@" ; do
if [ "$OSTYPE" == "cygwin" ] ; then
CF=`cygpath --windows "$CF"`
fi
if [ ! -f "$CF" ] ; then
echo "Doesn't exist or not a file: $CF" 1>&2
continue
fi
"$CAPINFOS" "$CF" > /dev/null
RETVAL=$?
if [ $RETVAL -ne 0 ] ; then
echo "Not a valid capture file (or some other problem)" 1>&2
continue
fi
printf "%s: " "$CF"
# Extract the protocol names.
$TSHARK -T fields -eframe.protocols -nr "$CF" 2>/dev/null | \
tr ':\r' '\n' | sort -u | tr '\n\r' ' '
printf "\n"
done |
Shell Script | wireshark/tools/macos-setup-brew.sh | #!/bin/bash
# Copyright 2014, Evan Huus (See AUTHORS file)
#
# Enhance (2016) by Alexis La Goutte (For use with Travis CI)
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
set -e -u -o pipefail
eval "$(brew shellenv)"
HOMEBREW_NO_AUTO_UPDATE=${HOMEBREW_NO_AUTO_UPDATE:-}
# Update to last brew release
if [ -z "$HOMEBREW_NO_AUTO_UPDATE" ] ; then
brew update
fi
function print_usage() {
printf "\\nUtility to setup a macOS system for Wireshark Development using Homebrew.\\n"
printf "The basic usage installs the needed software\\n\\n"
printf "Usage: %s [--install-optional] [--install-dmg-deps] [...other options...]\\n" "$0"
printf "\\t--install-optional: install optional software as well\\n"
printf "\\t--install-dmg-deps: install packages required to build the .dmg file\\n"
printf "\\t--install-sparkle-deps: install the Sparkle automatic updater\\n"
printf "\\t--install-all: install everything\\n"
printf "\\t[other]: other options are passed as-is to apt\\n"
}
INSTALLED_FORMULAE=$( brew list --formulae )
function install_formulae() {
INSTALL_LIST=()
for FORMULA in "$@" ; do
if ! grep --word-regexp "$FORMULA" > /dev/null 2>&1 <<<"$INSTALLED_FORMULAE" ; then
INSTALL_LIST+=( "$FORMULA" )
fi
done
if (( ${#INSTALL_LIST[@]} != 0 )); then
brew install "${INSTALL_LIST[@]}"
else
printf "Nothing to install.\n"
fi
}
INSTALL_OPTIONAL=0
INSTALL_DMG_DEPS=0
INSTALL_SPARKLE_DEPS=0
OPTIONS=()
for arg; do
case $arg in
--help)
print_usage
exit 0
;;
--install-optional)
INSTALL_OPTIONAL=1
;;
--install-dmg-deps)
INSTALL_DMG_DEPS=1
;;
--install-sparkle-deps)
INSTALL_SPARKLE_DEPS=1
;;
--install-all)
INSTALL_OPTIONAL=1
INSTALL_DMG_DEPS=1
INSTALL_SPARKLE_DEPS=1
;;
*)
OPTIONS+=("$arg")
;;
esac
done
BUILD_LIST=(
ccache
cmake
ninja
)
# Qt isn't technically required, but...
REQUIRED_LIST=(
c-ares
glib
libgcrypt
pcre2
qt6
speexdsp
)
ADDITIONAL_LIST=(
brotli
gettext
gnutls
libilbc
libmaxminddb
libsmi
libssh
libxml2
[email protected]
lz4
minizip
nghttp2
opus
snappy
spandsp
zstd
)
ACTUAL_LIST=( "${BUILD_LIST[@]}" "${REQUIRED_LIST[@]}" )
# Now arrange for optional support libraries
if [ $INSTALL_OPTIONAL -ne 0 ] ; then
ACTUAL_LIST+=( "${ADDITIONAL_LIST[@]}" )
fi
if (( ${#OPTIONS[@]} != 0 )); then
ACTUAL_LIST+=( "${OPTIONS[@]}" )
fi
install_formulae "${ACTUAL_LIST[@]}"
# Install python modules
pip3 install pytest pytest-xdist
if [ $INSTALL_DMG_DEPS -ne 0 ] ; then
pip3 install dmgbuild
pip3 install biplist
fi
if [ $INSTALL_SPARKLE_DEPS -ne 0 ] ; then
brew cask install sparkle
fi
# Uncomment to add PNG compression utilities used by compress-pngs:
# brew install advancecomp optipng oxipng pngcrush
# Uncomment to enable generation of documentation
# brew install asciidoctor
if [ -z "$HOMEBREW_NO_AUTO_UPDATE" ] ; then
brew doctor
fi
exit 0
#
# Editor modelines
#
# Local Variables:
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
#
# ex: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
# |
Shell Script | wireshark/tools/macos-setup.sh | #!/bin/bash
# Setup development environment on macOS (tested with 10.6.8 and Xcode
# 3.2.6 and with 10.12.4 and Xcode 8.3).
#
# Copyright 2011 Michael Tuexen, Joerg Mayer, Guy Harris (see AUTHORS file)
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
shopt -s extglob
#
# Get the major version of Darwin, so we can check the major macOS
# version.
#
DARWIN_MAJOR_VERSION=`uname -r | sed 's/\([0-9]*\).*/\1/'`
#
# The minimum supported version of Qt is 5.9, so the minimum supported version
# of macOS is OS X 10.10 (Yosemite), aka Darwin 14.0
if [[ $DARWIN_MAJOR_VERSION -lt 14 ]]; then
echo "This script does not support any versions of macOS before Yosemite" 1>&2
exit 1
fi
#
# Get the processor architecture of Darwin. Currently supported: arm, i386
#
DARWIN_PROCESSOR_ARCH=`uname -p`
if [ "$DARWIN_PROCESSOR_ARCH" != "arm" -a "$DARWIN_PROCESSOR_ARCH" != "i386" ]; then
echo "This script does not support this processor architecture" 1>&2
exit 1
fi
#
# Versions of packages to download and install.
#
#
# We use curl, but older versions of curl in older macOS releases can't
# handle some sites - including the xz site.
#
# If the version of curl in the system is older than 7.54.0, download
# curl and install it.
#
current_curl_version=`curl --version | sed -n 's/curl \([0-9.]*\) .*/\1/p'`
current_curl_major_version="`expr $current_curl_version : '\([0-9][0-9]*\).*'`"
current_curl_minor_version="`expr $current_curl_version : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
if [[ $current_curl_major_version -lt 7 ||
($current_curl_major_version -eq 7 &&
$current_curl_minor_version -lt 54) ]]; then
CURL_VERSION=${CURL_VERSION-7.60.0}
fi
#
# Some packages need xz to unpack their current source.
# XXX: tar, since macOS 10.9, can uncompress xz'ed tarballs,
# so perhaps we could get rid of this now?
#
XZ_VERSION=5.2.5
#
# Some packages need lzip to unpack their current source.
#
LZIP_VERSION=1.21
#
# The version of libPCRE on Catalina is insufficient to build glib due to
# missing UTF-8 support.
#
PCRE_VERSION=8.45
#
# CMake is required to do the build - and to build some of the
# dependencies.
#
CMAKE_VERSION=${CMAKE_VERSION-3.21.4}
#
# Ninja isn't required, as make is provided with Xcode, but it is
# claimed to build faster than make.
# Comment it out if you don't want it.
#
NINJA_VERSION=${NINJA_VERSION-1.10.2}
#
# The following libraries and tools are required even to build only TShark.
#
GETTEXT_VERSION=0.21
GLIB_VERSION=2.68.4
if [ "$GLIB_VERSION" ]; then
GLIB_MAJOR_VERSION="`expr $GLIB_VERSION : '\([0-9][0-9]*\).*'`"
GLIB_MINOR_VERSION="`expr $GLIB_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
GLIB_DOTDOT_VERSION="`expr $GLIB_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
GLIB_MAJOR_MINOR_VERSION=$GLIB_MAJOR_VERSION.$GLIB_MINOR_VERSION
GLIB_MAJOR_MINOR_DOTDOT_VERSION=$GLIB_MAJOR_VERSION.$GLIB_MINOR_VERSION.$GLIB_DOTDOT_VERSION
fi
PKG_CONFIG_VERSION=0.29.2
#
# libgpg-error is required for libgcrypt.
#
LIBGPG_ERROR_VERSION=1.39
#
# libgcrypt is required.
#
LIBGCRYPT_VERSION=1.8.7
#
# libpcre2 is required.
#
PCRE2_VERSION=10.39
#
# One or more of the following libraries are required to build Wireshark.
#
# To override the version of Qt call the script with some of the variables
# set to the new values. Setting the variable to empty will disable building
# the toolkit and will uninstall # any version previously installed by the
# script, e.g.
# "QT_VERSION=5.10.1 ./macos-setup.sh"
# will build and install with QT 5.10.1.
#
QT_VERSION=${QT_VERSION-6.2.4}
if [ "$QT_VERSION" ]; then
QT_MAJOR_VERSION="`expr $QT_VERSION : '\([0-9][0-9]*\).*'`"
QT_MINOR_VERSION="`expr $QT_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
QT_DOTDOT_VERSION="`expr $QT_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
QT_MAJOR_MINOR_VERSION=$QT_MAJOR_VERSION.$QT_MINOR_VERSION
QT_MAJOR_MINOR_DOTDOT_VERSION=$QT_MAJOR_VERSION.$QT_MINOR_VERSION.$QT_DOTDOT_VERSION
fi
#
# The following libraries are optional.
# Comment them out if you don't want them, but note that some of
# the optional libraries are required by other optional libraries.
#
LIBSMI_VERSION=0.4.8
GNUTLS_VERSION=3.6.15
if [ "$GNUTLS_VERSION" ]; then
#
# We'll be building GnuTLS, so we may need some additional libraries.
# We assume GnuTLS can work with Nettle; newer versions *only* use
# Nettle, not libgcrypt.
#
GNUTLS_MAJOR_VERSION="`expr $GNUTLS_VERSION : '\([0-9][0-9]*\).*'`"
GNUTLS_MINOR_VERSION="`expr $GNUTLS_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
NETTLE_VERSION=3.6
#
# And, in turn, Nettle requires GMP.
#
GMP_VERSION=6.2.1
#
# And p11-kit
P11KIT_VERSION=0.23.21
# Which requires libtasn1
LIBTASN1_VERSION=4.16.0
fi
# Use 5.2.4, not 5.3, for now; lua_bitop.c hasn't been ported to 5.3
# yet, and we need to check for compatibility issues (we'd want Lua
# scripts to work with 5.1, 5.2, and 5.3, as long as they only use Lua
# features present in all three versions)
LUA_VERSION=5.2.4
SNAPPY_VERSION=1.1.8
ZSTD_VERSION=1.4.2
LIBXML2_VERSION=2.9.9
LZ4_VERSION=1.9.2
SBC_VERSION=1.3
CARES_VERSION=1.15.0
LIBSSH_VERSION=0.9.6
# mmdbresolve
MAXMINDDB_VERSION=1.4.3
NGHTTP2_VERSION=1.46.0
SPANDSP_VERSION=0.0.6
SPEEXDSP_VERSION=1.2.0
if [ "$SPANDSP_VERSION" ]; then
#
# SpanDSP depends on libtiff.
#
LIBTIFF_VERSION=3.8.1
fi
BCG729_VERSION=1.0.2
ILBC_VERSION=2.0.2
OPUS_VERSION=1.3.1
#
# Is /usr/bin/python3 a working version of Python? It may be, as it
# might be a wrapper that runs the Python 3 that's part of Xcode.
#
if /usr/bin/python3 --version >/dev/null 2>&1
then
#
# Yes - don't bother installing Python 3 from elsewhere
#
:
else
#
# No - install a Python package.
#
PYTHON3_VERSION=3.9.5
fi
BROTLI_VERSION=1.0.9
# minizip
ZLIB_VERSION=1.2.11
# Uncomment to enable automatic updates using Sparkle
#SPARKLE_VERSION=2.1.0
#
# Asciidoctor is required to build the documentation.
#
ASCIIDOCTOR_VERSION=${ASCIIDOCTOR_VERSION-2.0.16}
ASCIIDOCTORPDF_VERSION=${ASCIIDOCTORPDF_VERSION-1.6.1}
#
# GNU autotools. They're not supplied with the macOS versions we
# support, and we currently use them for minizip.
#
AUTOCONF_VERSION=2.71
AUTOMAKE_VERSION=1.16.5
LIBTOOL_VERSION=2.4.6
install_curl() {
if [ "$CURL_VERSION" -a ! -f curl-$CURL_VERSION-done ] ; then
echo "Downloading, building, and installing curl:"
[ -f curl-$CURL_VERSION.tar.bz2 ] || curl -L -O https://curl.haxx.se/download/curl-$CURL_VERSION.tar.bz2 || exit 1
$no_build && echo "Skipping installation" && return
bzcat curl-$CURL_VERSION.tar.bz2 | tar xf - || exit 1
cd curl-$CURL_VERSION
./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch curl-$CURL_VERSION-done
fi
}
uninstall_curl() {
if [ ! -z "$installed_curl_version" ] ; then
echo "Uninstalling curl:"
cd curl-$installed_curl_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm curl-$installed_curl_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf curl-$installed_curl_version
rm -rf curl-$installed_curl_version.tar.bz2
fi
installed_curl_version=""
fi
}
install_xz() {
if [ "$XZ_VERSION" -a ! -f xz-$XZ_VERSION-done ] ; then
echo "Downloading, building, and installing xz:"
[ -f xz-$XZ_VERSION.tar.bz2 ] || curl -L -O https://tukaani.org/xz/xz-$XZ_VERSION.tar.bz2 || exit 1
$no_build && echo "Skipping installation" && return
bzcat xz-$XZ_VERSION.tar.bz2 | tar xf - || exit 1
cd xz-$XZ_VERSION
#
# This builds and installs liblzma, which libxml2 uses, and
# Wireshark uses liblzma, so we need to build this with
# all the minimum-deployment-version and SDK stuff.
#
CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch xz-$XZ_VERSION-done
fi
}
uninstall_xz() {
if [ ! -z "$installed_xz_version" ] ; then
echo "Uninstalling xz:"
cd xz-$installed_xz_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm xz-$installed_xz_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf xz-$installed_xz_version
rm -rf xz-$installed_xz_version.tar.bz2
fi
installed_xz_version=""
fi
}
install_lzip() {
if [ "$LZIP_VERSION" -a ! -f lzip-$LZIP_VERSION-done ] ; then
echo "Downloading, building, and installing lzip:"
[ -f lzip-$LZIP_VERSION.tar.gz ] || curl -L -O https://download.savannah.gnu.org/releases/lzip/lzip-$LZIP_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat lzip-$LZIP_VERSION.tar.gz | tar xf - || exit 1
cd lzip-$LZIP_VERSION
./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch lzip-$LZIP_VERSION-done
fi
}
uninstall_lzip() {
if [ ! -z "$installed_lzip_version" ] ; then
echo "Uninstalling lzip:"
cd lzip-$installed_lzip_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm lzip-$installed_lzip_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf lzip-$installed_lzip_version
rm -rf lzip-$installed_lzip_version.tar.gz
fi
installed_lzip_version=""
fi
}
install_pcre() {
if [ "$PCRE_VERSION" -a ! -f pcre-$PCRE_VERSION-done ] ; then
echo "Downloading, building, and installing pcre:"
[ -f pcre-$PCRE_VERSION.tar.bz2 ] || curl -L -O https://sourceforge.net/projects/pcre/files/pcre/$PCRE_VERSION/pcre-$PCRE_VERSION.tar.bz2 || exit 1
$no_build && echo "Skipping installation" && return
bzcat pcre-$PCRE_VERSION.tar.bz2 | tar xf - || exit 1
cd pcre-$PCRE_VERSION
./configure --enable-unicode-properties || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch pcre-$PCRE_VERSION-done
fi
}
uninstall_pcre() {
if [ ! -z "$installed_pcre_version" ] ; then
echo "Uninstalling pcre:"
cd pcre-$installed_pcre_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm pcre-$installed_pcre_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf pcre-$installed_pcre_version
rm -rf pcre-$installed_pcre_version.tar.bz2
fi
installed_pcre_version=""
fi
}
install_pcre2() {
if [ "$PCRE2_VERSION" -a ! -f "pcre2-$PCRE2_VERSION-done" ] ; then
echo "Downloading, building, and installing pcre2:"
[ -f "pcre2-$PCRE2_VERSION.tar.bz2" ] || curl -L -O "https://github.com/PhilipHazel/pcre2/releases/download/pcre2-$PCRE2_VERSION/pcre2-10.39.tar.bz2" || exit 1
$no_build && echo "Skipping installation" && return
bzcat "pcre2-$PCRE2_VERSION.tar.bz2" | tar xf - || exit 1
cd "pcre2-$PCRE2_VERSION"
mkdir build_dir
cd build_dir
# https://github.com/Homebrew/homebrew-core/blob/master/Formula/pcre2.rb
# https://github.com/microsoft/vcpkg/blob/master/ports/pcre2/portfile.cmake
MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" \
cmake -DBUILD_STATIC_LIBS=OFF -DBUILD_SHARED_LIBS=ON -DPCRE2_SUPPORT_JIT=ON -DPCRE2_SUPPORT_UNICODE=ON .. || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ../..
touch "pcre2-$PCRE2_VERSION-done"
fi
}
uninstall_pcre2() {
if [ -n "$installed_pcre2_version" ] && [ -s "pcre2-$installed_pcre2_version/build_dir/install_manifest.txt" ] ; then
echo "Uninstalling pcre2:"
# PCRE2 10.39 installs pcre2unicode.3 twice, so this will return an error.
while read -r ; do $DO_RM -v "$REPLY" ; done < <(cat "pcre2-$installed_pcre2_version/build_dir/install_manifest.txt"; echo)
rm "pcre2-$installed_pcre2_version-done"
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf "pcre2-$installed_pcre2_version"
rm -rf "pcre2-$installed_pcre2_version.tar.bz2"
fi
installed_pcre2_version=""
fi
}
install_autoconf() {
if [ "$AUTOCONF_VERSION" -a ! -f autoconf-$AUTOCONF_VERSION-done ] ; then
echo "Downloading, building and installing GNU autoconf..."
[ -f autoconf-$AUTOCONF_VERSION.tar.xz ] || curl -L -O ftp://ftp.gnu.org/gnu/autoconf/autoconf-$AUTOCONF_VERSION.tar.xz || exit 1
$no_build && echo "Skipping installation" && return
xzcat autoconf-$AUTOCONF_VERSION.tar.xz | tar xf - || exit 1
cd autoconf-$AUTOCONF_VERSION
./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch autoconf-$AUTOCONF_VERSION-done
fi
}
uninstall_autoconf() {
if [ ! -z "$installed_autoconf_version" ] ; then
#
# automake and libtool depend on this, so uninstall them.
#
uninstall_libtool "$@"
uninstall_automake "$@"
echo "Uninstalling GNU autoconf:"
cd autoconf-$installed_autoconf_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm autoconf-$installed_autoconf_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf autoconf-$installed_autoconf_version
rm -rf autoconf-$installed_autoconf_version.tar.xz
fi
installed_autoconf_version=""
fi
}
install_automake() {
if [ "$AUTOMAKE_VERSION" -a ! -f automake-$AUTOMAKE_VERSION-done ] ; then
echo "Downloading, building and installing GNU automake..."
[ -f automake-$AUTOMAKE_VERSION.tar.xz ] || curl -L -O ftp://ftp.gnu.org/gnu/automake/automake-$AUTOMAKE_VERSION.tar.xz || exit 1
$no_build && echo "Skipping installation" && return
xzcat automake-$AUTOMAKE_VERSION.tar.xz | tar xf - || exit 1
cd automake-$AUTOMAKE_VERSION
./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch automake-$AUTOMAKE_VERSION-done
fi
}
uninstall_automake() {
if [ ! -z "$installed_automake_version" ] ; then
#
# libtool depends on this(?), so uninstall it.
#
uninstall_libtool "$@"
echo "Uninstalling GNU automake:"
cd automake-$installed_automake_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm automake-$installed_automake_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf automake-$installed_automake_version
rm -rf automake-$installed_automake_version.tar.xz
fi
installed_automake_version=""
fi
}
install_libtool() {
if [ "$LIBTOOL_VERSION" -a ! -f libtool-$LIBTOOL_VERSION-done ] ; then
echo "Downloading, building and installing GNU libtool..."
[ -f libtool-$LIBTOOL_VERSION.tar.xz ] || curl -L -O ftp://ftp.gnu.org/gnu/libtool/libtool-$LIBTOOL_VERSION.tar.xz || exit 1
$no_build && echo "Skipping installation" && return
xzcat libtool-$LIBTOOL_VERSION.tar.xz | tar xf - || exit 1
cd libtool-$LIBTOOL_VERSION
./configure --program-prefix=g || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch libtool-$LIBTOOL_VERSION-done
fi
}
uninstall_libtool() {
if [ ! -z "$installed_libtool_version" ] ; then
echo "Uninstalling GNU libtool:"
cd libtool-$installed_libtool_version
$DO_MV /usr/local/bin/glibtool /usr/local/bin/libtool
$DO_MV /usr/local/bin/glibtoolize /usr/local/bin/libtoolize
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libtool-$installed_libtool_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf libtool-$installed_libtool_version
rm -rf libtool-$installed_libtool_version.tar.xz
fi
installed_libtool_version=""
fi
}
install_ninja() {
if [ "$NINJA_VERSION" -a ! -f ninja-$NINJA_VERSION-done ] ; then
echo "Downloading and installing Ninja:"
#
# Download the zipball, unpack it, and move the binary to
# /usr/local/bin.
#
[ -f ninja-mac-v$NINJA_VERSION.zip ] || curl -L -o ninja-mac-v$NINJA_VERSION.zip https://github.com/ninja-build/ninja/releases/download/v$NINJA_VERSION/ninja-mac.zip || exit 1
$no_build && echo "Skipping installation" && return
unzip ninja-mac-v$NINJA_VERSION.zip
sudo mv ninja /usr/local/bin
touch ninja-$NINJA_VERSION-done
fi
}
uninstall_ninja() {
if [ ! -z "$installed_ninja_version" ]; then
echo "Uninstalling Ninja:"
sudo rm /usr/local/bin/ninja
rm ninja-$installed_ninja_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
rm -f ninja-mac-v$installed_ninja_version.zip
fi
installed_ninja_version=""
fi
}
install_asciidoctor() {
if [ ! -f asciidoctor-${ASCIIDOCTOR_VERSION}-done ]; then
echo "Downloading and installing Asciidoctor:"
sudo gem install -V asciidoctor --version "=${ASCIIDOCTOR_VERSION}"
touch asciidoctor-${ASCIIDOCTOR_VERSION}-done
fi
}
uninstall_asciidoctor() {
if [ ! -z "$installed_asciidoctor_version" ]; then
echo "Uninstalling Asciidoctor:"
sudo gem uninstall -V asciidoctor --version "=${installed_asciidoctor_version}"
rm asciidoctor-$installed_asciidoctor_version-done
##if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version,
# whatever it might happen to be called.
#
## rm -f asciidoctor-$installed_asciidoctor_version
##fi
installed_asciidoctor_version=""
fi
}
install_asciidoctorpdf() {
if [ ! -f asciidoctorpdf-${ASCIIDOCTORPDF_VERSION}-done ]; then
## XXX gem does not track dependencies that are installed for asciidoctor-pdf
## record them for uninstallation
## ttfunk, pdf-core, prawn, prawn-table, Ascii85, ruby-rc4, hashery, afm, pdf-reader, prawn-templates, public_suffix, addressable, css_parser, prawn-svg, prawn-icon, safe_yaml, thread_safe, polyglot, treetop, asciidoctor-pdf
echo "Downloading and installing Asciidoctor-pdf:"
sudo gem install -V asciidoctor-pdf --version "=${ASCIIDOCTORPDF_VERSION}"
touch asciidoctorpdf-${ASCIIDOCTORPDF_VERSION}-done
fi
}
uninstall_asciidoctorpdf() {
if [ ! -z "$installed_asciidoctorpdf_version" ]; then
echo "Uninstalling Asciidoctor:"
sudo gem uninstall -V asciidoctor-pdf --version "=${installed_asciidoctorpdf_version}"
## XXX uninstall dependencies
rm asciidoctorpdf-$installed_asciidoctorpdf_version-done
##if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version,
# whatever it might happen to be called.
#
## rm -f asciidoctorpdf-$installed_asciidoctorpdf_version
##fi
installed_asciidoctorpdf_version=""
fi
}
install_cmake() {
if [ ! -f cmake-$CMAKE_VERSION-done ]; then
echo "Downloading and installing CMake:"
CMAKE_MAJOR_VERSION="`expr $CMAKE_VERSION : '\([0-9][0-9]*\).*'`"
CMAKE_MINOR_VERSION="`expr $CMAKE_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
CMAKE_MAJOR_MINOR_VERSION=$CMAKE_MAJOR_VERSION.$CMAKE_MINOR_VERSION
#
# NOTE: the "64" in "Darwin64" doesn't mean "64-bit-only"; the
# package in question supports both 32-bit and 64-bit x86.
#
case "$CMAKE_MAJOR_VERSION" in
0|1|2)
echo "CMake $CMAKE_VERSION" is too old 1>&2
;;
3)
#
# Download the DMG and do a drag install, where "drag" means
# "mv".
#
# 3.1.1 to 3.19.1 have a Darwin-x86_64 DMG.
# 3.19.2 has a macos-universal DMG for 10.10 and later
# 3.19.3 and later have a macos-universal DMG for 10.13 and later,
# and a macos10.10-universal DMG for 10.10 and later.
#
if [ "$CMAKE_MINOR_VERSION" -lt 5 ]; then
echo "CMake $CMAKE_VERSION" is too old 1>&2
elif [ "$CMAKE_MINOR_VERSION" -lt 19 -o \
"$CMAKE_VERSION" = 3.19.0 -o \
"$CMAKE_VERSION" = 3.19.1 ]; then
type="Darwin-x86_64"
elif [ "$CMAKE_VERSION" = 3.19.2 -o \
"$DARWIN_MAJOR_VERSION" -ge 17 ]; then
type="macos-universal"
else
type="macos10.0-universal"
fi
[ -f cmake-$CMAKE_VERSION-$type.dmg ] || curl -L -O https://cmake.org/files/v$CMAKE_MAJOR_MINOR_VERSION/cmake-$CMAKE_VERSION-$type.dmg || exit 1
$no_build && echo "Skipping installation" && return
sudo hdiutil attach cmake-$CMAKE_VERSION-$type.dmg || exit 1
sudo ditto /Volumes/cmake-$CMAKE_VERSION-$type/CMake.app /Applications/CMake.app || exit 1
#
# Plant the appropriate symbolic links in /usr/local/bin.
# It's a drag-install, so there's no installer to make them,
# and the CMake code to put them in place is lame, as
#
# 1) it defaults to /usr/bin, not /usr/local/bin;
# 2) it doesn't request the necessary root privileges;
# 3) it can't be run from the command line;
#
# so we do it ourselves.
#
for i in ccmake cmake cmake-gui cmakexbuild cpack ctest
do
sudo ln -s /Applications/CMake.app/Contents/bin/$i /usr/local/bin/$i
done
sudo hdiutil detach /Volumes/cmake-$CMAKE_VERSION-$type
;;
*)
;;
esac
touch cmake-$CMAKE_VERSION-done
fi
}
uninstall_cmake() {
if [ ! -z "$installed_cmake_version" ]; then
echo "Uninstalling CMake:"
installed_cmake_major_version="`expr $installed_cmake_version : '\([0-9][0-9]*\).*'`"
case "$installed_cmake_major_version" in
0|1|2)
echo "CMake $installed_cmake_version" is too old 1>&2
;;
3)
sudo rm -rf /Applications/CMake.app
for i in ccmake cmake cmake-gui cmakexbuild cpack ctest
do
sudo rm -f /usr/local/bin/$i
done
rm cmake-$installed_cmake_version-done
;;
esac
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version,
# whatever it might happen to be called.
#
rm -f cmake-$installed_cmake_version-Darwin-x86_64.dmg
rm -f cmake-$installed_cmake_version-macos-universal.dmg
rm -f cmake-$installed_cmake_version-macos10.0-universal.dmg
fi
installed_cmake_version=""
fi
}
install_meson() {
#
# Install Meson with pip3 if we don't have it already.
#
if $MESON --version >/dev/null 2>&1
then
# We have it.
:
else
sudo pip3 install meson
touch meson-done
fi
}
uninstall_meson() {
#
# If we installed Meson, uninstal it with pip3.
#
if [ -f meson-done ] ; then
sudo pip3 uninstall meson
rm -f meson-done
fi
}
install_pytest() {
#
# Install pytest with pip3 if we don't have it already.
#
if python3 -m pytest --version >/dev/null 2>&1
then
# We have it.
:
else
sudo pip3 install pytest pytest-xdist
touch pytest-done
fi
}
uninstall_pytest() {
#
# If we installed pytest, uninstal it with pip3.
#
if [ -f pytest-done ] ; then
sudo pip3 uninstall pytest pytest-xdist
rm -f pytest-done
fi
}
install_gettext() {
if [ ! -f gettext-$GETTEXT_VERSION-done ] ; then
echo "Downloading, building, and installing GNU gettext:"
[ -f gettext-$GETTEXT_VERSION.tar.gz ] || curl -L -O https://ftp.gnu.org/pub/gnu/gettext/gettext-$GETTEXT_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat gettext-$GETTEXT_VERSION.tar.gz | tar xf - || exit 1
cd gettext-$GETTEXT_VERSION
CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch gettext-$GETTEXT_VERSION-done
fi
}
uninstall_gettext() {
if [ ! -z "$installed_gettext_version" ] ; then
#
# GLib depends on this, so uninstall it.
#
uninstall_glib "$@"
echo "Uninstalling GNU gettext:"
cd gettext-$installed_gettext_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm gettext-$installed_gettext_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf gettext-$installed_gettext_version
rm -rf gettext-$installed_gettext_version.tar.gz
fi
installed_gettext_version=""
fi
}
install_pkg_config() {
if [ ! -f pkg-config-$PKG_CONFIG_VERSION-done ] ; then
echo "Downloading, building, and installing pkg-config:"
[ -f pkg-config-$PKG_CONFIG_VERSION.tar.gz ] || curl -L -O https://pkgconfig.freedesktop.org/releases/pkg-config-$PKG_CONFIG_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat pkg-config-$PKG_CONFIG_VERSION.tar.gz | tar xf - || exit 1
cd pkg-config-$PKG_CONFIG_VERSION
./configure --with-internal-glib || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch pkg-config-$PKG_CONFIG_VERSION-done
fi
}
uninstall_pkg_config() {
if [ ! -z "$installed_pkg_config_version" ] ; then
echo "Uninstalling pkg-config:"
cd pkg-config-$installed_pkg_config_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm pkg-config-$installed_pkg_config_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf pkg-config-$installed_pkg_config_version
rm -rf pkg-config-$installed_pkg_config_version.tar.gz
fi
installed_pkg_config_version=""
fi
}
install_glib() {
if [ ! -f glib-$GLIB_VERSION-done ] ; then
echo "Downloading, building, and installing GLib:"
glib_dir=`expr $GLIB_VERSION : '\([0-9][0-9]*\.[0-9][0-9]*\).*'`
#
# Starting with GLib 2.28.8, xz-compressed tarballs are available.
#
[ -f glib-$GLIB_VERSION.tar.xz ] || curl -L -O https://download.gnome.org/sources/glib/$glib_dir/glib-$GLIB_VERSION.tar.xz || exit 1
$no_build && echo "Skipping installation" && return
xzcat glib-$GLIB_VERSION.tar.xz | tar xf - || exit 1
cd glib-$GLIB_VERSION
#
# First, determine where the system include files are.
# (It's not necessarily /usr/include.) There's a bit of a
# greasy hack here; pre-5.x versions of the developer tools
# don't support the --show-sdk-path option, and will produce
# no output, so includedir will be set to /usr/include
# (in those older versions of the developer tools, there is
# a /usr/include directory).
#
# We need this for several things we do later.
#
includedir=`SDKROOT="$SDKPATH" xcrun --show-sdk-path 2>/dev/null`/usr/include
#
# GLib's configuration procedure, whether autotools-based or
# Meson-based, really likes to use pkg-config to find libraries,
# including libffi.
#
# At least some versions of macOS provide libffi, but, as macOS
# doesn't provide pkg-config, they don't provide a .pc file for
# it, so the autotools-based configuration needs some trickery
# to get it to find the OS-supplied libffi, and the Meson-based
# configuration simply won't find it at all.
#
# So, if we have a system-provided libffi, but pkg-config
# doesn't find libffi, we construct a .pc file for that libffi,
# and install it in /usr/local/lib/pkgconfig.
#
if pkg-config libffi ; then
# It found libffi; no need to install a .pc file, and we
# don't want to overwrite what's there already.
:
elif [ ! -e $includedir/ffi/ffi.h ] ; then
# We don't appear to have libffi as part of the system, so
# let the configuration process figure out what to do.
#
# We test for the header file, not the library, because, in
# Big Sur and later, there's no guarantee that, for a system
# shared library, there's a corresponding dylib file in
# /usr/lib.
:
else
#
# We have libffi, but pkg-config didn't find it; generate
# and install the .pc file.
#
#
# Now generate the .pc file.
#
# We generate the contents of the .pc file by using cat with
# a here document containing a template for the file and
# piping that to a sed command that replaces @INCLUDEDIR@ in
# the template with the include directory we discovered
# above, so that the .pc file gives the compiler flags
# necessary to find the libffi headers (which are *not*
# necessarily in /usr/include, as per the above).
#
# The EOF marker for the here document is in quotes, to tell
# the shell not to do shell expansion, as .pc files use a
# syntax to refer to .pc file variables that looks like the
# syntax to refer to shell variables.
#
# The writing of the libffi.pc file is a greasy hack - the
# process of generating the contents of the .pc file writes
# to the standard output, but running the last process in
# the pipeline as root won't allow the shell that's
# *running* it to open the .pc file if we don't have write
# permission on /usr/local/lib/pkgconfig, so we need a
# program that creates a file and then reads from the
# standard input and writes to that file. UN*Xes have a
# program that does that; it's called "tee". :-)
#
# However, it *also* writes the file to the standard output,
# so we redirect that to /dev/null when we run it.
#
cat <<"EOF" | sed "s;@INCLUDEDIR@;$includedir;" | $DO_TEE_TO_PC_FILE /usr/local/lib/pkgconfig/libffi.pc >/dev/null
prefix=/usr
libdir=${prefix}/lib
includedir=@INCLUDEDIR@
Name: ffi
Description: Library supporting Foreign Function Interfaces
Version: 3.2.9999
Libs: -L${libdir} -lffi
Cflags: -I${includedir}/ffi
EOF
fi
#
# GLib 2.59.1 and later use Meson+Ninja as the build system.
#
case $GLIB_MAJOR_VERSION in
1)
echo "GLib $GLIB_VERSION" is too old 1>&2
;;
*)
case $GLIB_MINOR_VERSION in
[0-9]|1[0-9]|2[0-9]|3[0-7])
echo "GLib $GLIB_VERSION" is too old 1>&2
;;
3[8-9]|4[0-9]|5[0-8])
if [ ! -f ./configure ]; then
LIBTOOLIZE=glibtoolize ./autogen.sh
fi
#
# At least with the version of Xcode that comes with
# Leopard, /usr/include/ffi/fficonfig.h doesn't define
# MACOSX, which causes the build of GLib to fail for at
# least some versions of GLib. If we don't find
# "#define.*MACOSX" in /usr/include/ffi/fficonfig.h,
# explicitly define it.
#
# While we're at it, suppress -Wformat-nonliteral to
# avoid a case where clang's stricter rules on when not
# to complain about non-literal format arguments cause
# it to complain about code that's safe but it wasn't
# told that. See my comment #25 in GNOME bug 691608:
#
# https://bugzilla.gnome.org/show_bug.cgi?id=691608#c25
#
if grep -qs '#define.*MACOSX' $includedir/ffi/fficonfig.h
then
# It's defined, nothing to do
CFLAGS="$CFLAGS -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
else
CFLAGS="$CFLAGS -DMACOSX -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -DMACOSX -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
fi
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
;;
59|[6-9][0-9]|[1-9][0-9][0-9])
#
# 2.59.0 doesn't require Meson and Ninja, but it
# supports it, and I'm too lazy to add a dot-dot
# version check.
#
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" $MESON _build || exit 1
ninja $MAKE_BUILD_OPTS -C _build || exit 1
$DO_NINJA_INSTALL || exit 1
;;
*)
echo "Glib's put out 1000 2.x releases?" 1>&2
;;
esac
esac
cd ..
touch glib-$GLIB_VERSION-done
fi
}
uninstall_glib() {
if [ ! -z "$installed_glib_version" ] ; then
echo "Uninstalling GLib:"
cd glib-$installed_glib_version
installed_glib_major_version="`expr $installed_glib_version : '\([0-9][0-9]*\).*'`"
installed_glib_minor_version="`expr $installed_glib_version : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
installed_glib_dotdot_version="`expr $installed_glib_version : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
installed_glib_major_minor_version=$installed_glib_major_version.$installed_glib_minor_version
installed_glib_major_minor_dotdot_version=$installed_glib_major_version.$installed_glib_minor_version.$installed_glib_dotdot_version
#
# GLib 2.59.1 and later use Meson+Ninja as the build system.
#
case $installed_glib_major_version in
1)
$DO_MAKE_UNINSTALL || exit 1
#
# This appears to delete dependencies out from under other
# Makefiles in the tree, causing it to fail. At least until
# that gets fixed, if it ever gets fixed, we just ignore the
# exit status of "make distclean"
#
# make distclean || exit 1
make distclean || echo "Ignoring make distclean failure" 1>&2
;;
*)
case $installed_glib_minor_version in
[0-9]|1[0-9]|2[0-9]|3[0-9]|4[0-9]|5[0-8])
$DO_MAKE_UNINSTALL || exit 1
#
# This appears to delete dependencies out from under other
# Makefiles in the tree, causing it to fail. At least until
# that gets fixed, if it ever gets fixed, we just ignore the
# exit status of "make distclean"
#
# make distclean || exit 1
make distclean || echo "Ignoring make distclean failure" 1>&2
;;
59|[6-9][0-9]|[1-9][0-9][0-9])
#
# 2.59.0 doesn't require Meson and Ninja, but it
# supports it, and I'm too lazy to add a dot-dot
# version check.
#
$DO_NINJA_UNINSTALL || exit 1
#
# For Meson+Ninja, we do the build in an _build
# subdirectory, so the equivalent of "make distclean"
# is just to remove the directory tree.
#
rm -rf _build
;;
*)
echo "Glib's put out 1000 2.x releases?" 1>&2
;;
esac
esac
cd ..
rm glib-$installed_glib_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf glib-$installed_glib_version
rm -rf glib-$installed_glib_version.tar.xz
fi
installed_glib_version=""
fi
}
install_qt() {
if [ "$QT_VERSION" -a ! -f qt-$QT_VERSION-done ]; then
echo "Downloading and installing Qt:"
#
# What you get for this URL might just be a 302 Found reply, so use
# -L so we get redirected.
#
# 5.0 - 5.1: qt-mac-opensource-{version}-clang-offline.dmg
# 5.2.0: qt-mac-opensource-{version}.dmg
# 5.2.1: qt-opensource-mac-x64-clang-{version}.dmg
# 5.3 - 5.8: qt-opensource-mac-x64-clang-{version}.dmg
# 5.9 - 5.14: qt-opensource-mac-x64-{version}.dmg
# 5.15 - 6.0: Offline installers no longer provided.
# ( https://download.qt.io/archive/qt/5.15/5.15.0/OFFLINE_README.txt )
# XXX: We need a different approach for QT >= 5.15
#
case $QT_MAJOR_VERSION in
1|2|3|4)
echo "Qt $QT_VERSION" is too old 1>&2
;;
5)
case $QT_MINOR_VERSION in
0|1|2|3|4|5|6|7|8)
echo "Qt $QT_VERSION" is too old 1>&2
;;
9|10|11|12|13|14)
QT_VOLUME=qt-opensource-mac-x64-$QT_VERSION
;;
*)
echo "The Qt Company no longer provides open source offline installers for Qt $QT_VERSION" 1>&2
;;
esac
[ -f $QT_VOLUME.dmg ] || curl -L -O https://download.qt.io/archive/qt/$QT_MAJOR_MINOR_VERSION/$QT_MAJOR_MINOR_DOTDOT_VERSION/$QT_VOLUME.dmg || exit 1
$no_build && echo "Skipping installation" && return
sudo hdiutil attach $QT_VOLUME.dmg || exit 1
#
# Run the installer executable directly, so that we wait for
# it to finish. Then unmount the volume.
#
/Volumes/$QT_VOLUME/$QT_VOLUME.app/Contents/MacOS/$QT_VOLUME
sudo hdiutil detach /Volumes/$QT_VOLUME
touch qt-$QT_VERSION-done
;;
*)
echo "The Qt Company no longer provides open source offline installers for Qt $QT_VERSION" 1>&2
;;
esac
fi
}
uninstall_qt() {
if [ ! -z "$installed_qt_version" ] ; then
echo "Uninstalling Qt:"
rm -rf $HOME/Qt$installed_qt_version
rm qt-$installed_qt_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded version.
#
# 5.0 - 5.1: qt-mac-opensource-{version}-clang-offline.dmg
# 5.2.0: qt-mac-opensource-{version}.dmg
# 5.2.1: qt-opensource-mac-x64-clang-{version}.dmg
# 5.3 - 5.8: qt-opensource-mac-x64-clang-{version}.dmg
# 5.9 - 5.14: qt-opensource-mac-x64-{version}.dmg
#
installed_qt_major_version="`expr $installed_qt_version : '\([0-9][0-9]*\).*'`"
installed_qt_minor_version="`expr $installed_qt_version : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
installed_qt_dotdot_version="`expr $installed_qt_version : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
case $installed_qt_major_version in
1|2|3|4)
echo "Qt $installed_qt_version" is too old 1>&2
;;
5*)
case $installed_qt_minor_version in
0|1|2|3|4|5)
echo "Qt $installed_qt_version" is too old 1>&2
;;
6|7|8)
installed_qt_volume=qt-opensource-mac-x64-clang-$installed_qt_version.dmg
;;
9|10|11|12|13|14)
installed_qt_volume=qt-opensource-mac-x64-$installed_qt_version.dmg
;;
esac
esac
rm -f $installed_qt_volume
fi
installed_qt_version=""
fi
}
install_libsmi() {
if [ "$LIBSMI_VERSION" -a ! -f libsmi-$LIBSMI_VERSION-done ] ; then
echo "Downloading, building, and installing libsmi:"
[ -f libsmi-$LIBSMI_VERSION.tar.gz ] || curl -L -O https://www.ibr.cs.tu-bs.de/projects/libsmi/download/libsmi-$LIBSMI_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat libsmi-$LIBSMI_VERSION.tar.gz | tar xf - || exit 1
cd libsmi-$LIBSMI_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch libsmi-$LIBSMI_VERSION-done
fi
}
uninstall_libsmi() {
if [ ! -z "$installed_libsmi_version" ] ; then
echo "Uninstalling libsmi:"
cd libsmi-$installed_libsmi_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libsmi-$installed_libsmi_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf libsmi-$installed_libsmi_version
rm -rf libsmi-$installed_libsmi_version.tar.gz
fi
installed_libsmi_version=""
fi
}
install_libgpg_error() {
if [ "$LIBGPG_ERROR_VERSION" -a ! -f libgpg-error-$LIBGPG_ERROR_VERSION-done ] ; then
echo "Downloading, building, and installing libgpg-error:"
[ -f libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/libgpg-error/libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 || exit 1
$no_build && echo "Skipping installation" && return
bzcat libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 | tar xf - || exit 1
cd libgpg-error-$LIBGPG_ERROR_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch libgpg-error-$LIBGPG_ERROR_VERSION-done
fi
}
uninstall_libgpg_error() {
if [ ! -z "$installed_libgpg_error_version" ] ; then
#
# libgcrypt depends on this, so uninstall it.
#
uninstall_libgcrypt "$@"
echo "Uninstalling libgpg-error:"
cd libgpg-error-$installed_libgpg_error_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libgpg-error-$installed_libgpg_error_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf libgpg-error-$installed_libgpg_error_version
rm -rf libgpg-error-$installed_libgpg_error_version.tar.bz2
fi
installed_libgpg_error_version=""
fi
}
install_libgcrypt() {
if [ "$LIBGCRYPT_VERSION" -a ! -f libgcrypt-$LIBGCRYPT_VERSION-done ] ; then
#
# libgpg-error is required for libgcrypt.
#
if [ -z $LIBGPG_ERROR_VERSION ]
then
echo "libgcrypt requires libgpg-error, but you didn't install libgpg-error." 1>&2
exit 1
fi
echo "Downloading, building, and installing libgcrypt:"
[ -f libgcrypt-$LIBGCRYPT_VERSION.tar.gz ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/libgcrypt/libgcrypt-$LIBGCRYPT_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat libgcrypt-$LIBGCRYPT_VERSION.tar.gz | tar xf - || exit 1
cd libgcrypt-$LIBGCRYPT_VERSION
#
# The assembler language code is not compatible with the macOS
# x86 assembler (or is it an x86-64 vs. x86-32 issue?).
#
# libgcrypt expects gnu89, not c99/gnu99, semantics for
# "inline". See, for example:
#
# https://lists.freebsd.org/pipermail/freebsd-ports-bugs/2010-October/198809.html
#
CFLAGS="$CFLAGS -std=gnu89 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-asm || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch libgcrypt-$LIBGCRYPT_VERSION-done
fi
}
uninstall_libgcrypt() {
if [ ! -z "$installed_libgcrypt_version" ] ; then
echo "Uninstalling libgcrypt:"
cd libgcrypt-$installed_libgcrypt_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libgcrypt-$installed_libgcrypt_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf libgcrypt-$installed_libgcrypt_version
rm -rf libgcrypt-$installed_libgcrypt_version.tar.gz
fi
installed_libgcrypt_version=""
fi
}
install_gmp() {
if [ "$GMP_VERSION" -a ! -f gmp-$GMP_VERSION-done ] ; then
echo "Downloading, building, and installing GMP:"
[ -f gmp-$GMP_VERSION.tar.lz ] || curl -L -O https://gmplib.org/download/gmp/gmp-$GMP_VERSION.tar.lz || exit 1
$no_build && echo "Skipping installation" && return
lzip -c -d gmp-$GMP_VERSION.tar.lz | tar xf - || exit 1
cd gmp-$GMP_VERSION
# Create a fat binary: https://gmplib.org/manual/Notes-for-Package-Builds.html
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --enable-fat || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch gmp-$GMP_VERSION-done
fi
}
uninstall_gmp() {
if [ ! -z "$installed_gmp_version" ] ; then
#
# Nettle depends on this, so uninstall it.
#
uninstall_nettle "$@"
echo "Uninstalling GMP:"
cd gmp-$installed_gmp_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm gmp-$installed_gmp_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf gmp-$installed_gmp_version
rm -rf gmp-$installed_gmp_version.tar.lz
fi
installed_gmp_version=""
fi
}
install_libtasn1() {
if [ "$LIBTASN1_VERSION" -a ! -f libtasn1-$LIBTASN1_VERSION-done ] ; then
echo "Downloading, building, and installing libtasn1:"
[ -f libtasn1-$LIBTASN1_VERSION.tar.gz ] || curl -L -O https://ftpmirror.gnu.org/libtasn1/libtasn1-$LIBTASN1_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat libtasn1-$LIBTASN1_VERSION.tar.gz | tar xf - || exit 1
cd libtasn1-$LIBTASN1_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch libtasn1-$LIBTASN1_VERSION-done
fi
}
uninstall_libtasn1() {
if [ ! -z "$installed_libtasn1_version" ] ; then
#
# p11-kit depends on this, so uninstall it.
#
uninstall_p11_kit "$@"
echo "Uninstalling libtasn1:"
cd libtasn1-$installed_libtasn1_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libtasn1-$installed_libtasn1_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf libtasn1-$installed_libtasn1_version
rm -rf libtasn1-$installed_libtasn1_version.tar.gz
fi
installed_libtasn1_version=""
fi
}
install_p11_kit() {
if [ "$P11KIT_VERSION" -a ! -f p11-kit-$P11KIT_VERSION-done ] ; then
echo "Downloading, building, and installing p11-kit:"
[ -f p11-kit-$P11KIT_VERSION.tar.xz ] || curl -L -O https://github.com/p11-glue/p11-kit/releases/download/$P11KIT_VERSION/p11-kit-$P11KIT_VERSION.tar.xz || exit 1
$no_build && echo "Skipping installation" && return
xzcat p11-kit-$P11KIT_VERSION.tar.xz | tar xf - || exit 1
cd p11-kit-$P11KIT_VERSION
#
# Prior to Catalina, the libffi that's supplied with macOS
# doesn't support ffi_closure_alloc() or ffi_prep_closure_loc(),
# both of which are required by p11-kit if built with libffi.
#
# According to
#
# https://p11-glue.github.io/p11-glue/p11-kit/manual/devel-building.html
#
# libffi is used "for sharing of PKCS#11 modules between
# multiple callers in the same process. It is highly recommended
# that this dependency be treated as a required dependency.",
# but it's not clear that this matters to us, so we just
# configure p11-kit not to use libffi.
#
CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --without-libffi --without-trust-paths || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch p11-kit-$P11KIT_VERSION-done
fi
}
uninstall_p11_kit() {
if [ ! -z "$installed_p11_kit_version" ] ; then
#
# Nettle depends on this, so uninstall it.
#
uninstall_nettle "$@"
echo "Uninstalling p11-kit:"
cd p11-kit-$installed_p11_kit_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm p11-kit-$installed_p11_kit_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf p11-kit-$installed_p11_kit_version
rm -rf p11-kit-$installed_p11_kit_version.tar.xz
fi
installed_p11_kit_version=""
fi
}
install_nettle() {
if [ "$NETTLE_VERSION" -a ! -f nettle-$NETTLE_VERSION-done ] ; then
echo "Downloading, building, and installing Nettle:"
[ -f nettle-$NETTLE_VERSION.tar.gz ] || curl -L -O https://ftp.gnu.org/gnu/nettle/nettle-$NETTLE_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat nettle-$NETTLE_VERSION.tar.gz | tar xf - || exit 1
cd nettle-$NETTLE_VERSION
if [ "$DARWIN_PROCESSOR_ARCH" = "arm" ] ; then
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-assembler || exit 1
else
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
fi
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch nettle-$NETTLE_VERSION-done
fi
}
uninstall_nettle() {
if [ ! -z "$installed_nettle_version" ] ; then
#
# GnuTLS depends on this, so uninstall it.
#
uninstall_gnutls "$@"
echo "Uninstalling Nettle:"
cd nettle-$installed_nettle_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm nettle-$installed_nettle_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf nettle-$installed_nettle_version
rm -rf nettle-$installed_nettle_version.tar.gz
fi
installed_nettle_version=""
fi
}
install_gnutls() {
if [ "$GNUTLS_VERSION" -a ! -f gnutls-$GNUTLS_VERSION-done ] ; then
#
# GnuTLS requires Nettle.
#
if [ -z $NETTLE_VERSION ]
then
echo "GnuTLS requires Nettle, but you didn't install Nettle" 1>&2
exit 1
fi
echo "Downloading, building, and installing GnuTLS:"
if [[ $GNUTLS_MAJOR_VERSION -ge 3 ]]
then
#
# Starting with GnuTLS 3.x, the tarballs are compressed with
# xz rather than bzip2.
#
[ -f gnutls-$GNUTLS_VERSION.tar.xz ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/gnutls/v$GNUTLS_MAJOR_VERSION.$GNUTLS_MINOR_VERSION/gnutls-$GNUTLS_VERSION.tar.xz || exit 1
$no_build && echo "Skipping installation" && return
xzcat gnutls-$GNUTLS_VERSION.tar.xz | tar xf - || exit 1
else
[ -f gnutls-$GNUTLS_VERSION.tar.bz2 ] || curl -L -O https://www.gnupg.org/ftp/gcrypt/gnutls/v$GNUTLS_MAJOR_VERSION.$GNUTLS_MINOR_VERSION/gnutls-$GNUTLS_VERSION.tar.bz2 || exit 1
$no_build && echo "Skipping installation" && return
bzcat gnutls-$GNUTLS_VERSION.tar.bz2 | tar xf - || exit 1
fi
cd gnutls-$GNUTLS_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --with-included-unistring --disable-guile || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch gnutls-$GNUTLS_VERSION-done
fi
}
uninstall_gnutls() {
if [ ! -z "$installed_gnutls_version" ] ; then
echo "Uninstalling GnuTLS:"
cd gnutls-$installed_gnutls_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm gnutls-$installed_gnutls_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf gnutls-$installed_gnutls_version
rm -rf gnutls-$installed_gnutls_version.tar.bz2
fi
installed_gnutls_version=""
fi
}
install_lua() {
if [ "$LUA_VERSION" -a ! -f lua-$LUA_VERSION-done ] ; then
echo "Downloading, building, and installing Lua:"
[ -f lua-$LUA_VERSION.tar.gz ] || curl -L -O https://www.lua.org/ftp/lua-$LUA_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat lua-$LUA_VERSION.tar.gz | tar xf - || exit 1
cd lua-$LUA_VERSION
make MYCFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" MYLDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" $MAKE_BUILD_OPTS macosx || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch lua-$LUA_VERSION-done
fi
}
uninstall_lua() {
if [ ! -z "$installed_lua_version" ] ; then
echo "Uninstalling Lua:"
#
# Lua has no "make uninstall", so just remove stuff manually.
# There's no configure script, so there's no need for
# "make distclean", either; just do "make clean".
#
(cd /usr/local/bin; $DO_RM -f lua luac)
(cd /usr/local/include; $DO_RM -f lua.h luaconf.h lualib.h lauxlib.h lua.hpp)
(cd /usr/local/lib; $DO_RM -f liblua.a)
(cd /usr/local/man/man1; $DO_RM -f lua.1 luac.1)
cd lua-$installed_lua_version
make clean || exit 1
cd ..
rm lua-$installed_lua_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf lua-$installed_lua_version
rm -rf lua-$installed_lua_version.tar.gz
fi
installed_lua_version=""
fi
}
install_snappy() {
if [ "$SNAPPY_VERSION" -a ! -f snappy-$SNAPPY_VERSION-done ] ; then
echo "Downloading, building, and installing snappy:"
[ -f snappy-$SNAPPY_VERSION.tar.gz ] || curl -L -o snappy-$SNAPPY_VERSION.tar.gz https://github.com/google/snappy/archive/$SNAPPY_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat snappy-$SNAPPY_VERSION.tar.gz | tar xf - || exit 1
cd snappy-$SNAPPY_VERSION
mkdir build_dir
cd build_dir
#
# Build a shared library, because we'll be linking libwireshark,
# which is a C library, with libsnappy, and libsnappy is a C++
# library and requires the C++ run time; the shared library
# will carry that dependency with it, so linking with it should
# Just Work.
#
MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" cmake -DBUILD_SHARED_LIBS=YES ../ || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ../..
touch snappy-$SNAPPY_VERSION-done
fi
}
uninstall_snappy() {
if [ ! -z "$installed_snappy_version" ] ; then
echo "Uninstalling snappy:"
cd snappy-$installed_snappy_version
#
# snappy uses cmake and doesn't support "make uninstall";
# just remove what we know it installs.
#
# $DO_MAKE_UNINSTALL || exit 1
if [ -s build_dir/install_manifest.txt ] ; then
while read -r ; do $DO_RM -v "$REPLY" ; done < <(cat build_dir/install_manifest.txt; echo)
else
$DO_RM -f /usr/local/lib/libsnappy.1.1.8.dylib \
/usr/local/lib/libsnappy.1.dylib \
/usr/local/lib/libsnappy.dylib \
/usr/local/include/snappy-c.h \
/usr/local/include/snappy-sinksource.h \
/usr/local/include/snappy-stubs-public.h \
/usr/local/include/snappy.h \
/usr/local/lib/cmake/Snappy/SnappyConfig.cmake \
/usr/local/lib/cmake/Snappy/SnappyConfigVersion.cmake \
/usr/local/lib/cmake/Snappy/SnappyTargets-noconfig.cmake \
/usr/local/lib/cmake/Snappy/SnappyTargets.cmake || exit 1
fi
#
# snappy uses cmake and doesn't support "make distclean";
#.just remove the entire build directory.
#
# make distclean || exit 1
rm -rf build_dir || exit 1
cd ..
rm snappy-$installed_snappy_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf snappy-$installed_snappy_version
rm -rf snappy-$installed_snappy_version.tar.gz
fi
installed_snappy_version=""
fi
}
install_zstd() {
if [ "$ZSTD_VERSION" -a ! -f zstd-$ZSTD_VERSION-done ] ; then
echo "Downloading, building, and installing zstd:"
[ -f zstd-$ZSTD_VERSION.tar.gz ] || curl -L -O https://github.com/facebook/zstd/releases/download/v$ZSTD_VERSION/zstd-$ZSTD_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat zstd-$ZSTD_VERSION.tar.gz | tar xf - || exit 1
cd zstd-$ZSTD_VERSION
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch zstd-$ZSTD_VERSION-done
fi
}
uninstall_zstd() {
if [ ! -z "$installed_zstd_version" ] ; then
echo "Uninstalling zstd:"
cd zstd-$installed_zstd_version
$DO_MAKE_UNINSTALL || exit 1
#
# zstd has no configure script, so there's no need for
# "make distclean", and the Makefile supplied with it
# has no "make distclean" rule; just do "make clean".
#
make clean || exit 1
cd ..
rm zstd-$installed_zstd_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf zstd-$installed_zstd_version
rm -rf zstd-$installed_zstd_version.tar.gz
fi
installed_zstd_version=""
fi
}
install_libxml2() {
if [ "$LIBXML2_VERSION" -a ! -f libxml2-$LIBXML2_VERSION-done ] ; then
echo "Downloading, building, and installing libxml2:"
[ -f libxml2-$LIBXML2_VERSION.tar.gz ] || curl -L -O ftp://xmlsoft.org/libxml2/libxml2-$LIBXML2_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat libxml2-$LIBXML2_VERSION.tar.gz | tar xf - || exit 1
cd libxml2-$LIBXML2_VERSION
#
# At least on macOS 12.0.1 with Xcode 13.1, when we build
# libxml2, the linker complains that we don't have the right
# to link with the Python framework, so don't build with
# Python.
#
CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --without-python || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch libxml2-$LIBXML2_VERSION-done
fi
}
uninstall_libxml2() {
if [ ! -z "$installed_libxml2_version" ] ; then
echo "Uninstalling libxml2:"
cd libxml2-$installed_libxml2_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libxml2-$installed_libxml2_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf libxml2-$installed_libxml2_version
rm -rf libxml2-$installed_libxml2_version.tar.gz
fi
installed_libxml2_version=""
fi
}
install_lz4() {
if [ "$LZ4_VERSION" -a ! -f lz4-$LZ4_VERSION-done ] ; then
echo "Downloading, building, and installing lz4:"
#
# lz4 switched from sequentially numbered releases, named rN,
# to vX.Y.Z-numbered releases.
#
# The old sequentially-numbered releases were in tarballs
# at https://github.com/lz4/lz4/archive/rN.tar.gz, which
# extract into an lz4-rN directory.
#
# THe new vX.Y.Z-numbered releases are in tarballs at
# https://github.com/lz4/lz4/archive/vX.Y.Z.tar.gz, which
# extract into an lz4-X.Y.Z directory - no, not lz4-vX.Y.Z,
# just lz4-X.Y.Z.
#
# We expect LZ4_VERSION to be set to rN for the sequentially-
# numbered releases and X.Y.Z - not vX.Y.Z - for the vX.Y.Z-
# numbered releases. We also tell Curl to download the tarball
# with a name that corresponds to the name of the target
# directory, so that it begins with "lz4-" and ends with either
# "rN" or "X.Y.Z", to match what almost all of the other
# support libraries do.
#
if [[ "$LZ4_VERSION" == r* ]]
then
[ -f lz4-$LZ4_VERSION.tar.gz ] || curl -L -o lz4-$LZ4_VERSION.tar.gz https://github.com/lz4/lz4/archive/$LZ4_VERSION.tar.gz || exit 1
else
[ -f lz4-$LZ4_VERSION.tar.gz ] || curl -L -o lz4-$LZ4_VERSION.tar.gz https://github.com/lz4/lz4/archive/v$LZ4_VERSION.tar.gz || exit 1
fi
$no_build && echo "Skipping installation" && return
gzcat lz4-$LZ4_VERSION.tar.gz | tar xf - || exit 1
cd lz4-$LZ4_VERSION
#
# No configure script here, but it appears that if MOREFLAGS is
# set, that's added to CFLAGS, and those are combined with LDFLAGS
# and CXXFLAGS into FLAGS, which is used when building source
# files and libraries.
#
MOREFLAGS="-D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch lz4-$LZ4_VERSION-done
fi
}
uninstall_lz4() {
if [ ! -z "$installed_lz4_version" ] ; then
echo "Uninstalling lz4:"
cd lz4-$installed_lz4_version
$DO_MAKE_UNINSTALL || exit 1
#
# lz4's Makefile doesn't support "make distclean"; just do
# "make clean". Perhaps not using autotools means that
# there's no need for "make distclean".
#
# make distclean || exit 1
make clean || exit 1
cd ..
rm lz4-$installed_lz4_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
# "make install" apparently causes some stuff to be
# modified in the build tree, so, as it's done as
# root, that leaves stuff owned by root in the build
# tree. Therefore, we have to remove the build tree
# as root.
#
sudo rm -rf lz4-$installed_lz4_version
rm -rf lz4-$installed_lz4_version.tar.gz
fi
installed_lz4_version=""
fi
}
install_sbc() {
if [ "$SBC_VERSION" -a ! -f sbc-$SBC_VERSION-done ] ; then
echo "Downloading, building, and installing sbc:"
[ -f sbc-$SBC_VERSION.tar.gz ] || curl -L -O https://www.kernel.org/pub/linux/bluetooth/sbc-$SBC_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat sbc-$SBC_VERSION.tar.gz | tar xf - || exit 1
cd sbc-$SBC_VERSION
if [ "$DARWIN_PROCESSOR_ARCH" = "arm" ] ; then
CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS -U__ARM_NEON__" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-tools --disable-tester --disable-shared || exit 1
else
CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-tools --disable-tester --disable-shared || exit 1
fi
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch sbc-$SBC_VERSION-done
fi
}
uninstall_sbc() {
if [ ! -z "$installed_sbc_version" ] ; then
echo "Uninstalling sbc:"
cd sbc-$installed_sbc_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm sbc-$installed_sbc_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf sbc-$installed_sbc_version
rm -rf sbc-$installed_sbc_version.tar.gz
fi
installed_sbc_version=""
fi
}
install_maxminddb() {
if [ "$MAXMINDDB_VERSION" -a ! -f maxminddb-$MAXMINDDB_VERSION-done ] ; then
echo "Downloading, building, and installing MaxMindDB API:"
[ -f libmaxminddb-$MAXMINDDB_VERSION.tar.gz ] || curl -L -O https://github.com/maxmind/libmaxminddb/releases/download/$MAXMINDDB_VERSION/libmaxminddb-$MAXMINDDB_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat libmaxminddb-$MAXMINDDB_VERSION.tar.gz | tar xf - || exit 1
cd libmaxminddb-$MAXMINDDB_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch maxminddb-$MAXMINDDB_VERSION-done
fi
}
uninstall_maxminddb() {
if [ ! -z "$installed_maxminddb_version" ] ; then
echo "Uninstalling MaxMindDB API:"
cd libmaxminddb-$installed_maxminddb_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm maxminddb-$installed_maxminddb_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf libmaxminddb-$installed_maxminddb_version
rm -rf libmaxminddb-$installed_maxminddb_version.tar.gz
fi
installed_maxminddb_version=""
fi
}
install_c_ares() {
if [ "$CARES_VERSION" -a ! -f c-ares-$CARES_VERSION-done ] ; then
echo "Downloading, building, and installing C-Ares API:"
[ -f c-ares-$CARES_VERSION.tar.gz ] || curl -L -O https://c-ares.org/download/c-ares-$CARES_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat c-ares-$CARES_VERSION.tar.gz | tar xf - || exit 1
cd c-ares-$CARES_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch c-ares-$CARES_VERSION-done
fi
}
uninstall_c_ares() {
if [ ! -z "$installed_cares_version" ] ; then
echo "Uninstalling C-Ares API:"
cd c-ares-$installed_cares_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm c-ares-$installed_cares_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf c-ares-$installed_cares_version
rm -rf c-ares-$installed_cares_version.tar.gz
fi
installed_cares_version=""
fi
}
install_libssh() {
if [ "$LIBSSH_VERSION" -a ! -f libssh-$LIBSSH_VERSION-done ] ; then
echo "Downloading, building, and installing libssh:"
LIBSSH_MAJOR_VERSION="`expr $LIBSSH_VERSION : '\([0-9][0-9]*\).*'`"
LIBSSH_MINOR_VERSION="`expr $LIBSSH_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
LIBSSH_MAJOR_MINOR_VERSION=$LIBSSH_MAJOR_VERSION.$LIBSSH_MINOR_VERSION
[ -f libssh-$LIBSSH_VERSION.tar.xz ] || curl -L -O https://www.libssh.org/files/$LIBSSH_MAJOR_MINOR_VERSION/libssh-$LIBSSH_VERSION.tar.xz
$no_build && echo "Skipping installation" && return
xzcat libssh-$LIBSSH_VERSION.tar.xz | tar xf - || exit 1
cd libssh-$LIBSSH_VERSION
patch -p0 <${topdir}/macosx-support-lib-patches/libssh-void-arglists.patch || exit 1
mkdir build
cd build
MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" cmake -DWITH_GCRYPT=1 ../ || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ../..
touch libssh-$LIBSSH_VERSION-done
fi
}
uninstall_libssh() {
if [ ! -z "$installed_libssh_version" ] ; then
echo "Uninstalling libssh:"
cd libssh-$installed_libssh_version
#
# libssh uses cmake and doesn't support "make uninstall";
# just remove what we know it installs.
#
# $DO_MAKE_UNINSTALL || exit 1
$DO_RM -rf /usr/local/lib/libssh* \
/usr/local/include/libssh \
/usr/local/lib/pkgconfig/libssh* \
/usr/local/lib/cmake/libssh || exit 1
#
# libssh uses cmake and doesn't support "make distclean";
# just remove the entire build directory.
#
# make distclean || exit 1
rm -rf build || exit 1
cd ..
rm libssh-$installed_libssh_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf libssh-$installed_libssh_version
rm -rf libssh-$installed_libssh_version.tar.gz
fi
installed_libssh_version=""
fi
}
install_nghttp2() {
if [ "$NGHTTP2_VERSION" -a ! -f nghttp2-$NGHTTP2_VERSION-done ] ; then
echo "Downloading, building, and installing nghttp2:"
[ -f nghttp2-$NGHTTP2_VERSION.tar.xz ] || curl -L -O https://github.com/nghttp2/nghttp2/releases/download/v$NGHTTP2_VERSION/nghttp2-$NGHTTP2_VERSION.tar.xz || exit 1
$no_build && echo "Skipping installation" && return
xzcat nghttp2-$NGHTTP2_VERSION.tar.xz | tar xf - || exit 1
cd nghttp2-$NGHTTP2_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --enable-lib-only || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch nghttp2-$NGHTTP2_VERSION-done
fi
}
uninstall_nghttp2() {
if [ ! -z "$installed_nghttp2_version" ] ; then
echo "Uninstalling nghttp2:"
cd nghttp2-$installed_nghttp2_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm nghttp2-$installed_nghttp2_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf nghttp2-$installed_nghttp2_version
rm -rf nghttp2-$installed_nghttp2_version.tar.xz
fi
installed_nghttp2_version=""
fi
}
install_libtiff() {
if [ "$LIBTIFF_VERSION" -a ! -f tiff-$LIBTIFF_VERSION-done ] ; then
echo "Downloading, building, and installing libtiff:"
[ -f tiff-$LIBTIFF_VERSION.tar.gz ] ||
curl --fail -L -O https://download.osgeo.org/libtiff/tiff-$LIBTIFF_VERSION.tar.gz ||
curl --fail -L -O https://download.osgeo.org/libtiff/old/tiff-$LIBTIFF_VERSION.tar.gz ||
exit 1
$no_build && echo "Skipping installation" && return
gzcat tiff-$LIBTIFF_VERSION.tar.gz | tar xf - || exit 1
cd tiff-$LIBTIFF_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch tiff-$LIBTIFF_VERSION-done
fi
}
uninstall_libtiff() {
if [ ! -z "$installed_libtiff_version" ] ; then
echo "Uninstalling libtiff:"
cd tiff-$installed_libtiff_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm tiff-$installed_libtiff_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf tiff-$installed_libtiff_version
rm -rf tiff-$installed_libtiff_version.tar.gz
fi
installed_libtiff_version=""
fi
}
install_spandsp() {
if [ "$SPANDSP_VERSION" -a ! -f spandsp-$SPANDSP_VERSION-done ] ; then
echo "Downloading, building, and installing SpanDSP:"
[ -f spandsp-$SPANDSP_VERSION.tar.gz ] || curl -L -O https://www.soft-switch.org/downloads/spandsp/spandsp-$SPANDSP_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat spandsp-$SPANDSP_VERSION.tar.gz | tar xf - || exit 1
cd spandsp-$SPANDSP_VERSION
#
# Don't use -Wunused-but-set-variable, as it's not supported
# by all the gcc versions in the versions of Xcode that we
# support.
#
patch -p0 <${topdir}/macosx-support-lib-patches/spandsp-configure-patch || exit 1
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch spandsp-$SPANDSP_VERSION-done
fi
}
uninstall_spandsp() {
if [ ! -z "$installed_spandsp_version" ] ; then
echo "Uninstalling SpanDSP:"
cd spandsp-$installed_spandsp_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm spandsp-$installed_spandsp_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf spandsp-$installed_spandsp_version
rm -rf spandsp-$installed_spandsp_version.tar.gz
fi
installed_spandsp_version=""
fi
}
install_speexdsp() {
if [ "$SPEEXDSP_VERSION" -a ! -f speexdsp-$SPEEXDSP_VERSION-done ] ; then
echo "Downloading, building, and installing SpeexDSP:"
[ -f speexdsp-$SPEEXDSP_VERSION.tar.gz ] || curl -L -O https://ftp.osuosl.org/pub/xiph/releases/speex/speexdsp-$SPEEXDSP_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat speexdsp-$SPEEXDSP_VERSION.tar.gz | tar xf - || exit 1
cd speexdsp-$SPEEXDSP_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch speexdsp-$SPEEXDSP_VERSION-done
fi
}
uninstall_speexdsp() {
if [ ! -z "$installed_speexdsp_version" ] ; then
echo "Uninstalling SpeexDSP:"
cd speexdsp-$installed_speexdsp_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm speexdsp-$installed_speexdsp_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf speexdsp-$installed_speexdsp_version
rm -rf speexdsp-$installed_speexdsp_version.tar.gz
fi
installed_speexdsp_version=""
fi
}
install_bcg729() {
if [ "$BCG729_VERSION" -a ! -f bcg729-$BCG729_VERSION-done ] ; then
echo "Downloading, building, and installing bcg729:"
[ -f bcg729-$BCG729_VERSION.tar.gz ] || curl -L -O https://download.savannah.gnu.org/releases/linphone/plugins/sources/bcg729-$BCG729_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat bcg729-$BCG729_VERSION.tar.gz | tar xf - || exit 1
cd bcg729-$BCG729_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch bcg729-$BCG729_VERSION-done
fi
}
uninstall_bcg729() {
if [ ! -z "$installed_bcg729_version" ] ; then
echo "Uninstalling bcg729:"
cd bcg729-$installed_bcg729_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm bcg729-$installed_bcg729_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf bcg729-$installed_bcg729_version
rm -rf bcg729-$installed_bcg729_version.tar.gz
fi
installed_bcg729_version=""
fi
}
install_ilbc() {
if [ -n "$ILBC_VERSION" ] && [ ! -f ilbc-$ILBC_VERSION-done ] ; then
echo "Downloading, building, and installing iLBC:"
[ -f libilbc-$ILBC_VERSION.tar.bz ] || curl --location --remote-name https://github.com/TimothyGu/libilbc/releases/download/v$ILBC_VERSION/libilbc-$ILBC_VERSION.tar.bz2 || exit 1
$no_build && echo "Skipping installation" && return
bzcat libilbc-$ILBC_VERSION.tar.bz2 | tar xf - || exit 1
cd libilbc-$ILBC_VERSION || exit 1
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch ilbc-$ILBC_VERSION-done
fi
}
uninstall_ilbc() {
if [ -n "$installed_ilbc_version" ] ; then
echo "Uninstalling iLBC:"
cd "libilbc-$installed_ilbc_version" || exit 1
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm "ilbc-$installed_ilbc_version-done"
if [ "$#" -eq 1 ] && [ "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf "libilbc-$installed_ilbc_version"
rm -rf "libilbc-$installed_ilbc_version.tar.bz2"
fi
installed_ilbc_version=""
fi
}
install_opus() {
if [ "$OPUS_VERSION" -a ! -f opus-$OPUS_VERSION-done ] ; then
echo "Downloading, building, and installing opus:"
[ -f opus-$OPUS_VERSION.tar.gz ] || curl -L -O https://archive.mozilla.org/pub/opus/opus-$OPUS_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat opus-$OPUS_VERSION.tar.gz | tar xf - || exit 1
cd opus-$OPUS_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch opus-$OPUS_VERSION-done
fi
}
uninstall_opus() {
if [ ! -z "$installed_opus_version" ] ; then
echo "Uninstalling opus:"
cd opus-$installed_opus_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm opus-$installed_opus_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf opus-$installed_opus_version
rm -rf opus-$installed_opus_version.tar.gz
fi
installed_opus_version=""
fi
}
install_python3() {
# The macos11 installer can be deployed to older versions, down to
# 10.9 (Mavericks), but is still considered experimental so continue
# to use the 64-bit installer (10.9) on earlier releases for now.
local macver=x10.9
if [[ $DARWIN_MAJOR_VERSION -gt 19 ]]; then
# The macos11 installer is required for Arm-based Macs, which require
# macOS 11 Big Sur. Note that the package name is "11.0" (no x) for
# 3.9.1 but simply "11" for 3.9.2 (and later)
if [[ $PYTHON3_VERSION = 3.9.1 ]]; then
macver=11.0
else
macver=11
fi
fi
if [ "$PYTHON3_VERSION" -a ! -f python3-$PYTHON3_VERSION-done ] ; then
echo "Downloading and installing python3:"
[ -f python-$PYTHON3_VERSION-macos$macver.pkg ] || curl -L -O https://www.python.org/ftp/python/$PYTHON3_VERSION/python-$PYTHON3_VERSION-macos$macver.pkg || exit 1
$no_build && echo "Skipping installation" && return
sudo installer -target / -pkg python-$PYTHON3_VERSION-macos$macver.pkg || exit 1
touch python3-$PYTHON3_VERSION-done
#
# On macOS, the pip3 installed from Python packages appears to
# install scripts /Library/Frameworks/Python.framework/Versions/M.N/bin,
# where M.N is the major and minor version of Python (the dot-dot
# release is irrelevant).
#
# Strip off any dot-dot component in $PYTHON3_VERSION.
#
python_version=`echo $PYTHON3_VERSION | sed 's/\([1-9][0-9]*\.[1-9][0-9]*\).*/\1/'`
#
# Now treat Meson as being in the directory in question.
#
MESON="/Library/Frameworks/Python.framework/Versions/$python_version/bin/meson"
else
#
# We're using the Python 3 that's in /usr/bin, the pip3 for
# which installs scripts in /usr/local/bin, so, when we
# install Meson, look for it there.
#
MESON=/usr/local/bin/meson
fi
}
uninstall_python3() {
# Major version (e.g. "3.7")
local PYTHON_VERSION=${installed_python3_version%.*}
if [ ! -z "$installed_python3_version" ] ; then
echo "Uninstalling python3:"
frameworkdir="/Library/Frameworks/Python.framework/Versions/$PYTHON_VERSION"
sudo rm -rf "$frameworkdir"
sudo rm -rf "/Applications/Python $PYTHON_VERSION"
sudo find /usr/local/bin -maxdepth 1 -lname "*$frameworkdir/bin/*" -delete
# Remove three symlinks and empty directories. Removing directories
# might fail if for some reason multiple versions are installed.
sudo rm /Library/Frameworks/Python.framework/Headers
sudo rm /Library/Frameworks/Python.framework/Python
sudo rm /Library/Frameworks/Python.framework/Resources
sudo rmdir /Library/Frameworks/Python.framework/Versions
sudo rmdir /Library/Frameworks/Python.framework
sudo pkgutil --forget org.python.Python.PythonApplications-$PYTHON_VERSION
sudo pkgutil --forget org.python.Python.PythonDocumentation-$PYTHON_VERSION
sudo pkgutil --forget org.python.Python.PythonFramework-$PYTHON_VERSION
sudo pkgutil --forget org.python.Python.PythonUnixTools-$PYTHON_VERSION
rm python3-$installed_python3_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -f python-$installed_python3_version-macos11.pkg
rm -f python-$installed_python3_version-macos11.0.pkg
rm -f python-$installed_python3_version-macosx10.9.pkg
rm -f python-$installed_python3_version-macosx10.6.pkg
fi
installed_python3_version=""
fi
}
install_brotli() {
if [ "$BROTLI_VERSION" -a ! -f brotli-$BROTLI_VERSION-done ] ; then
echo "Downloading, building, and installing brotli:"
[ -f brotli-$BROTLI_VERSION.tar.gz ] || curl -L -o brotli-$BROTLI_VERSION.tar.gz https://github.com/google/brotli/archive/v$BROTLI_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat brotli-$BROTLI_VERSION.tar.gz | tar xf - || exit 1
cd brotli-$BROTLI_VERSION
mkdir build_dir
cd build_dir
MACOSX_DEPLOYMENT_TARGET=$min_osx_target SDKROOT="$SDKPATH" cmake ../ || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ../..
touch brotli-$BROTLI_VERSION-done
fi
}
uninstall_brotli() {
if [ ! -z "$installed_brotli_version" ] ; then
echo "Uninstalling brotli:"
cd brotli-$installed_brotli_version
#
# brotli uses cmake on macOS and doesn't support "make uninstall";
# just remove what we know it installs.
#
# $DO_MAKE_UNINSTALL || exit 1
$DO_RM -rf /usr/local/bin/brotli \
/usr/local/lib/libbrotli* \
/usr/local/include/brotli \
/usr/local/lib/pkgconfig/libbrotli* || exit 1
#
# brotli uses cmake on macOS and doesn't support "make distclean";
# just remove the enire build directory.
#
# make distclean || exit 1
rm -rf build_dir || exit 1
cd ..
rm brotli-$installed_brotli_version-done
if [ "$#" -eq 1 -a "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf brotli-$installed_brotli_version
rm -rf brotli-$installed_brotli_version.tar.gz
fi
installed_brotli_version=""
fi
}
install_minizip() {
if [ "$ZLIB_VERSION" ] && [ ! -f minizip-$ZLIB_VERSION-done ] ; then
echo "Downloading, building, and installing zlib for minizip:"
[ -f zlib-$ZLIB_VERSION.tar.gz ] || curl -L -o zlib-$ZLIB_VERSION.tar.gz https://zlib.net/zlib-$ZLIB_VERSION.tar.gz || exit 1
$no_build && echo "Skipping installation" && return
gzcat zlib-$ZLIB_VERSION.tar.gz | tar xf - || exit 1
#
# minizip ships both with a minimal Makefile that doesn't
# support "make install", "make uninstall", or "make distclean",
# and with a Makefile.am file that, if we do an autoreconf,
# gives us a configure script, and a Makefile.in that, if we run
# the configure script, gives us a Makefile that supports ll of
# those targets, and that installs a pkg-config .pc file for
# minizip.
#
# So that's what we do.
#
cd zlib-$ZLIB_VERSION/contrib/minizip || exit 1
LIBTOOLIZE=glibtoolize autoreconf --force --install
CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" CXXFLAGS="$CXXFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ../../..
touch minizip-$ZLIB_VERSION-done
fi
}
uninstall_minizip() {
if [ -n "$installed_minizip_version" ] ; then
echo "Uninstalling minizip:"
cd zlib-$installed_minizip_version/contrib/minizip
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ../../..
rm minizip-$installed_minizip_version-done
if [ "$#" -eq 1 ] && [ "$1" = "-r" ] ; then
#
# Get rid of the previously downloaded and unpacked version.
#
rm -rf zlib-$installed_minizip_version
rm -rf zlib-$installed_minizip_version.tar.gz
fi
installed_minizip_version=""
fi
}
install_sparkle() {
if [ "$SPARKLE_VERSION" ] && [ ! -f sparkle-$SPARKLE_VERSION-done ] ; then
echo "Downloading and installing Sparkle:"
#
# Download the tarball and unpack it in /usr/local/Sparkle-x.y.z
#
[ -f Sparkle-$SPARKLE_VERSION.tar.xz ] || curl -L -o Sparkle-$SPARKLE_VERSION.tar.xz https://github.com/sparkle-project/Sparkle/releases/download/$SPARKLE_VERSION/Sparkle-$SPARKLE_VERSION.tar.xz || exit 1
$no_build && echo "Skipping installation" && return
test -d "/usr/local/Sparkle-$SPARKLE_VERSION" || sudo mkdir "/usr/local/Sparkle-$SPARKLE_VERSION"
sudo tar -C "/usr/local/Sparkle-$SPARKLE_VERSION" -xpof Sparkle-$SPARKLE_VERSION.tar.xz
touch sparkle-$SPARKLE_VERSION-done
fi
}
uninstall_sparkle() {
if [ -n "$installed_sparkle_version" ]; then
echo "Uninstalling Sparkle:"
sudo rm -rf "/usr/local/Sparkle-$installed_sparkle_version"
if [ "$#" -eq 1 ] && [ "$1" = "-r" ] ; then
rm -f "Sparkle-$installed_sparkle_version.tar.xz"
fi
installed_sparkle_version=""
fi
}
install_all() {
#
# Check whether the versions we have installed are the versions
# requested; if not, uninstall the installed versions.
#
if [ ! -z "$installed_brotli_version" -a \
"$installed_brotli_version" != "$BROTLI_VERSION" ] ; then
echo "Installed brotli version is $installed_brotli_version"
if [ -z "$BROTLI_VERSION" ] ; then
echo "brotli is not requested"
else
echo "Requested brotli version is $BROTLI_VERSION"
fi
uninstall_brotli -r
fi
if [ ! -z "$installed_python3_version" -a \
"$installed_python3_version" != "$PYTHON3_VERSION" ] ; then
echo "Installed python3 version is $installed_python3_version"
if [ -z "$PYTHON3_VERSION" ] ; then
echo "python3 is not requested"
else
echo "Requested python3 version is $PYTHON3_VERSION"
fi
uninstall_python3 -r
fi
if [ ! -z "$installed_bcg729_version" -a \
"$installed_bcg729_version" != "$BCG729_VERSION" ] ; then
echo "Installed bcg729 version is $installed_bcg729_version"
if [ -z "$BCG729_VERSION" ] ; then
echo "bcg729 is not requested"
else
echo "Requested bcg729 version is $BCG729_VERSION"
fi
uninstall_bcg729 -r
fi
if [ -n "$installed_ilbc_version" ] \
&& [ "$installed_ilbc_version" != "$ILBC_VERSION" ] ; then
echo "Installed iLBC version is $installed_ilbc_version"
if [ -z "$ILBC_VERSION" ] ; then
echo "iLBC is not requested"
else
echo "Requested iLBC version is $ILBC_VERSION"
fi
uninstall_ilbc -r
fi
if [ -n "$installed_opus_version" ] \
&& [ "$installed_opus_version" != "$OPUS_VERSION" ] ; then
echo "Installed opus version is $installed_opus_version"
if [ -z "$OPUS_VERSION" ] ; then
echo "opus is not requested"
else
echo "Requested opus version is $OPUS_VERSION"
fi
uninstall_opus -r
fi
if [ ! -z "$installed_spandsp_version" -a \
"$installed_spandsp_version" != "$SPANDSP_VERSION" ] ; then
echo "Installed SpanDSP version is $installed_spandsp_version"
if [ -z "$SPANDSP_VERSION" ] ; then
echo "spandsp is not requested"
else
echo "Requested SpanDSP version is $SPANDSP_VERSION"
fi
uninstall_spandsp -r
fi
if [ ! -z "$installed_speexdsp_version" -a \
"$installed_speexdsp_version" != "$SPEEXDSP_VERSION" ] ; then
echo "Installed SpeexDSP version is $installed_speexdsp_version"
if [ -z "$SPEEXDSP_VERSION" ] ; then
echo "speexdsp is not requested"
else
echo "Requested SpeexDSP version is $SPEEXDSP_VERSION"
fi
uninstall_speexdsp -r
fi
if [ ! -z "$installed_libtiff_version" -a \
"$installed_libtiff_version" != "$LIBTIFF_VERSION" ] ; then
echo "Installed libtiff version is $installed_libtiff_version"
if [ -z "$LIBTIFF_VERSION" ] ; then
echo "libtiff is not requested"
else
echo "Requested libtiff version is $LIBTIFF_VERSION"
fi
uninstall_libtiff -r
fi
if [ ! -z "$installed_nghttp2_version" -a \
"$installed_nghttp2_version" != "$NGHTTP2_VERSION" ] ; then
echo "Installed nghttp2 version is $installed_nghttp2_version"
if [ -z "$NGHTTP2_VERSION" ] ; then
echo "nghttp2 is not requested"
else
echo "Requested nghttp2 version is $NGHTTP2_VERSION"
fi
uninstall_nghttp2 -r
fi
if [ ! -z "$installed_libssh_version" -a \
"$installed_libssh_version" != "$LIBSSH_VERSION" ] ; then
echo "Installed libssh version is $installed_libssh_version"
if [ -z "$LIBSSH_VERSION" ] ; then
echo "libssh is not requested"
else
echo "Requested libssh version is $LIBSSH_VERSION"
fi
uninstall_libssh -r
fi
if [ ! -z "$installed_cares_version" -a \
"$installed_cares_version" != "$CARES_VERSION" ] ; then
echo "Installed C-Ares version is $installed_cares_version"
if [ -z "$CARES_VERSION" ] ; then
echo "C-Ares is not requested"
else
echo "Requested C-Ares version is $CARES_VERSION"
fi
uninstall_c_ares -r
fi
if [ ! -z "$installed_maxminddb_version" -a \
"$installed_maxminddb_version" != "$MAXMINDDB_VERSION" ] ; then
echo "Installed MaxMindDB API version is $installed_maxminddb_version"
if [ -z "$MAXMINDDB_VERSION" ] ; then
echo "MaxMindDB is not requested"
else
echo "Requested MaxMindDB version is $MAXMINDDB_VERSION"
fi
uninstall_maxminddb -r
fi
if [ ! -z "$installed_sbc_version" -a \
"$installed_sbc_version" != "$SBC_VERSION" ] ; then
echo "Installed SBC version is $installed_sbc_version"
if [ -z "$SBC_VERSION" ] ; then
echo "SBC is not requested"
else
echo "Requested SBC version is $SBC_VERSION"
fi
uninstall_sbc -r
fi
if [ ! -z "$installed_lz4_version" -a \
"$installed_lz4_version" != "$LZ4_VERSION" ] ; then
echo "Installed LZ4 version is $installed_lz4_version"
if [ -z "$LZ4_VERSION" ] ; then
echo "LZ4 is not requested"
else
echo "Requested LZ4 version is $LZ4_VERSION"
fi
uninstall_lz4 -r
fi
if [ ! -z "$installed_libxml2_version" -a \
"$installed_libxml2_version" != "$LIBXML2_VERSION" ] ; then
echo "Installed libxml2 version is $installed_libxml2_version"
if [ -z "$LIBXML2_VERSION" ] ; then
echo "libxml2 is not requested"
else
echo "Requested libxml2 version is $LIBXML2_VERSION"
fi
uninstall_libxml2 -r
fi
if [ ! -z "$installed_snappy_version" -a \
"$installed_snappy_version" != "$SNAPPY_VERSION" ] ; then
echo "Installed SNAPPY version is $installed_snappy_version"
if [ -z "$SNAPPY_VERSION" ] ; then
echo "SNAPPY is not requested"
else
echo "Requested SNAPPY version is $SNAPPY_VERSION"
fi
uninstall_snappy -r
fi
if [ ! -z "$installed_lua_version" -a \
"$installed_lua_version" != "$LUA_VERSION" ] ; then
echo "Installed Lua version is $installed_lua_version"
if [ -z "$LUA_VERSION" ] ; then
echo "Lua is not requested"
else
echo "Requested Lua version is $LUA_VERSION"
fi
uninstall_lua -r
fi
if [ ! -z "$installed_gnutls_version" -a \
"$installed_gnutls_version" != "$GNUTLS_VERSION" ] ; then
echo "Installed GnuTLS version is $installed_gnutls_version"
if [ -z "$GNUTLS_VERSION" ] ; then
echo "GnuTLS is not requested"
else
echo "Requested GnuTLS version is $GNUTLS_VERSION"
fi
uninstall_gnutls -r
fi
if [ ! -z "$installed_nettle_version" -a \
"$installed_nettle_version" != "$NETTLE_VERSION" ] ; then
echo "Installed Nettle version is $installed_nettle_version"
if [ -z "$NETTLE_VERSION" ] ; then
echo "Nettle is not requested"
else
echo "Requested Nettle version is $NETTLE_VERSION"
fi
uninstall_nettle -r
fi
if [ ! -z "$installed_gmp_version" -a \
"$installed_gmp_version" != "$GMP_VERSION" ] ; then
echo "Installed GMP version is $installed_gmp_version"
if [ -z "$GMP_VERSION" ] ; then
echo "GMP is not requested"
else
echo "Requested GMP version is $GMP_VERSION"
fi
uninstall_gmp -r
fi
if [ ! -z "$installed_p11_kit_version" -a \
"$installed_p11_kit_version" != "$P11KIT_VERSION" ] ; then
echo "Installed p11-kit version is $installed_p11_kit_version"
if [ -z "$P11KIT_VERSION" ] ; then
echo "p11-kit is not requested"
else
echo "Requested p11-kit version is $P11KIT_VERSION"
fi
uninstall_p11_kit -r
fi
if [ ! -z "$installed_libtasn1_version" -a \
"$installed_libtasn1_version" != "$LIBTASN1_VERSION" ] ; then
echo "Installed libtasn1 version is $installed_libtasn1_version"
if [ -z "$LIBTASN1_VERSION" ] ; then
echo "libtasn1 is not requested"
else
echo "Requested libtasn1 version is $LIBTASN1_VERSION"
fi
uninstall_libtasn1 -r
fi
if [ ! -z "$installed_libgcrypt_version" -a \
"$installed_libgcrypt_version" != "$LIBGCRYPT_VERSION" ] ; then
echo "Installed libgcrypt version is $installed_libgcrypt_version"
if [ -z "$LIBGCRYPT_VERSION" ] ; then
echo "libgcrypt is not requested"
else
echo "Requested libgcrypt version is $LIBGCRYPT_VERSION"
fi
uninstall_libgcrypt -r
fi
if [ ! -z "$installed_libgpg_error_version" -a \
"$installed_libgpg_error_version" != "$LIBGPG_ERROR_VERSION" ] ; then
echo "Installed libgpg-error version is $installed_libgpg_error_version"
if [ -z "$LIBGPG_ERROR_VERSION" ] ; then
echo "libgpg-error is not requested"
else
echo "Requested libgpg-error version is $LIBGPG_ERROR_VERSION"
fi
uninstall_libgpg_error -r
fi
if [ ! -z "$installed_libsmi_version" -a \
"$installed_libsmi_version" != "$LIBSMI_VERSION" ] ; then
echo "Installed libsmi version is $installed_libsmi_version"
if [ -z "$LIBSMI_VERSION" ] ; then
echo "libsmi is not requested"
else
echo "Requested libsmi version is $LIBSMI_VERSION"
fi
uninstall_libsmi -r
fi
if [ ! -z "$installed_qt_version" -a \
"$installed_qt_version" != "$QT_VERSION" ] ; then
echo "Installed Qt version is $installed_qt_version"
if [ -z "$QT_VERSION" ] ; then
echo "Qt is not requested"
else
echo "Requested Qt version is $QT_VERSION"
fi
uninstall_qt -r
fi
if [ ! -z "$installed_glib_version" -a \
"$installed_glib_version" != "$GLIB_VERSION" ] ; then
echo "Installed GLib version is $installed_glib_version"
if [ -z "$GLIB_VERSION" ] ; then
echo "GLib is not requested"
else
echo "Requested GLib version is $GLIB_VERSION"
fi
uninstall_glib -r
fi
if [ ! -z "$installed_pkg_config_version" -a \
"$installed_pkg_config_version" != "$PKG_CONFIG_VERSION" ] ; then
echo "Installed pkg-config version is $installed_pkg_config_version"
if [ -z "$PKG_CONFIG_VERSION" ] ; then
echo "pkg-config is not requested"
else
echo "Requested pkg-config version is $PKG_CONFIG_VERSION"
fi
uninstall_pkg_config -r
fi
if [ ! -z "$installed_gettext_version" -a \
"$installed_gettext_version" != "$GETTEXT_VERSION" ] ; then
echo "Installed GNU gettext version is $installed_gettext_version"
if [ -z "$GETTEXT_VERSION" ] ; then
echo "GNU gettext is not requested"
else
echo "Requested GNU gettext version is $GETTEXT_VERSION"
fi
uninstall_gettext -r
fi
if [ ! -z "$installed_ninja_version" -a \
"$installed_ninja_version" != "$NINJA_VERSION" ] ; then
echo "Installed Ninja version is $installed_ninja_version"
if [ -z "$NINJA_VERSION" ] ; then
echo "Ninja is not requested"
else
echo "Requested Ninja version is $NINJA_VERSION"
fi
uninstall_ninja -r
fi
if [ ! -z "$installed_asciidoctorpdf_version" -a \
"$installed_asciidoctorpdf_version" != "$ASCIIDOCTORPDF_VERSION" ] ; then
echo "Installed Asciidoctor-pdf version is $installed_asciidoctorpdf_version"
if [ -z "$ASCIIDOCTORPDF_VERSION" ] ; then
echo "Asciidoctor-pdf is not requested"
else
echo "Requested Asciidoctor-pdf version is $ASCIIDOCTORPDF_VERSION"
fi
# XXX - really remove this?
# Or should we remember it as installed only if this script
# installed it?
#
uninstall_asciidoctorpdf -r
fi
if [ ! -z "$installed_asciidoctor_version" -a \
"$installed_asciidoctor_version" != "$ASCIIDOCTOR_VERSION" ] ; then
echo "Installed Asciidoctor version is $installed_asciidoctor_version"
if [ -z "$ASCIIDOCTOR_VERSION" ] ; then
echo "Asciidoctor is not requested"
else
echo "Requested Asciidoctor version is $ASCIIDOCTOR_VERSION"
fi
# XXX - really remove this?
# Or should we remember it as installed only if this script
# installed it?
#
uninstall_asciidoctor -r
fi
if [ ! -z "$installed_cmake_version" -a \
"$installed_cmake_version" != "$CMAKE_VERSION" ] ; then
echo "Installed CMake version is $installed_cmake_version"
if [ -z "$CMAKE_VERSION" ] ; then
echo "CMake is not requested"
else
echo "Requested CMake version is $CMAKE_VERSION"
fi
uninstall_cmake -r
fi
if [ ! -z "$installed_libtool_version" -a \
"$installed_libtool_version" != "$LIBTOOL_VERSION" ] ; then
echo "Installed GNU libtool version is $installed_libtool_version"
if [ -z "$LIBTOOL_VERSION" ] ; then
echo "GNU libtool is not requested"
else
echo "Requested GNU libtool version is $LIBTOOL_VERSION"
fi
uninstall_libtool -r
fi
if [ ! -z "$installed_automake_version" -a \
"$installed_automake_version" != "$AUTOMAKE_VERSION" ] ; then
echo "Installed GNU automake version is $installed_automake_version"
if [ -z "$AUTOMAKE_VERSION" ] ; then
echo "GNU automake is not requested"
else
echo "Requested GNU automake version is $AUTOMAKE_VERSION"
fi
uninstall_automake -r
fi
if [ ! -z "$installed_autoconf_version" -a \
"$installed_autoconf_version" != "$AUTOCONF_VERSION" ] ; then
echo "Installed GNU autoconf version is $installed_autoconf_version"
if [ -z "$AUTOCONF_VERSION" ] ; then
echo "GNU autoconf is not requested"
else
echo "Requested GNU autoconf version is $AUTOCONF_VERSION"
fi
uninstall_autoconf -r
fi
if [ ! -z "$installed_pcre_version" -a \
"$installed_pcre_version" != "$PCRE_VERSION" ] ; then
echo "Installed pcre version is $installed_pcre_version"
if [ -z "$PCRE_VERSION" ] ; then
echo "pcre is not requested"
else
echo "Requested pcre version is $PCRE_VERSION"
fi
uninstall_pcre -r
fi
if [ -n "$installed_pcre2_version" -a \
"$installed_pcre2_version" != "$PCRE2_VERSION" ] ; then
echo "Installed pcre2 version is $installed_pcre2_version"
if [ -z "$PCRE2_VERSION" ] ; then
echo "pcre2 is not requested"
else
echo "Requested pcre2 version is $PCRE2_VERSION"
fi
uninstall_pcre2 -r
fi
if [ ! -z "$installed_lzip_version" -a \
"$installed_lzip_version" != "$LZIP_VERSION" ] ; then
echo "Installed lzip version is $installed_lzip_version"
if [ -z "$LZIP_VERSION" ] ; then
echo "lzip is not requested"
else
echo "Requested lzip version is $LZIP_VERSION"
fi
uninstall_lzip -r
fi
if [ ! -z "$installed_xz_version" -a \
"$installed_xz_version" != "$XZ_VERSION" ] ; then
echo "Installed xz version is $installed_xz_version"
if [ -z "$XZ_VERSION" ] ; then
echo "xz is not requested"
else
echo "Requested xz version is $XZ_VERSION"
fi
uninstall_xz -r
fi
if [ ! -z "$installed_curl_version" -a \
"$installed_curl_version" != "$CURL_VERSION" ] ; then
echo "Installed curl version is $installed_curl_version"
if [ -z "$CURL_VERSION" ] ; then
echo "curl is not requested"
else
echo "Requested curl version is $CURL_VERSION"
fi
uninstall_curl -r
fi
if [ ! -z "$installed_minizip_version" -a \
"$installed_minizip_version" != "$ZLIB_VERSION" ] ; then
echo "Installed minizip (zlib) version is $installed_minizip_version"
if [ -z "$ZLIB_VERSION" ] ; then
echo "minizip is not requested"
else
echo "Requested minizip (zlib) version is $ZLIB_VERSION"
fi
uninstall_minizip -r
fi
if [ ! -z "$installed_sparkle_version" -a \
"$installed_sparkle_version" != "$SPARKLE_VERSION" ] ; then
echo "Installed Sparkle version is $installed_sparkle_version"
if [ -z "$SPARKLE_VERSION" ] ; then
echo "Sparkle is not requested"
else
echo "Requested Sparkle version is $SPARKLE_VERSION"
fi
uninstall_sparkle -r
fi
#
# Start with curl: we may need it to download and install xz.
#
install_curl
#
# Now intall xz: it is the sole download format of glib later than 2.31.2.
#
install_xz
install_lzip
install_pcre
install_autoconf
install_automake
install_libtool
install_cmake
install_pcre2
#
# Install Python 3 now; not only is it needed for the Wireshark
# build process, it's also needed for the Meson build system,
# which newer versions of GLib use as their build system.
#
install_python3
#
# Now install Meson and pytest.
#
install_meson
install_pytest
install_ninja
install_asciidoctor
install_asciidoctorpdf
#
# Start with GNU gettext; GLib requires it, and macOS doesn't have it
# or a BSD-licensed replacement.
#
# At least on Lion with Xcode 4, _FORTIFY_SOURCE gets defined as 2
# by default, which causes, for example, stpncpy to be defined as
# a hairy macro that collides with the GNU gettext configure script's
# attempts to workaround AIX's lack of a declaration for stpncpy,
# with the result being a huge train wreck. Define _FORTIFY_SOURCE
# as 0 in an attempt to keep the trains on separate tracks.
#
install_gettext
#
# GLib depends on pkg-config.
# By default, pkg-config depends on GLib; we break the dependency cycle
# by configuring pkg-config to use its own internal version of GLib.
#
install_pkg_config
install_glib
#
# Now we have reached a point where we can build everything but
# the GUI (Wireshark).
#
install_qt
#
# Now we have reached a point where we can build everything including
# the GUI (Wireshark), but not with any optional features such as
# SNMP OID resolution, some forms of decryption, Lua scripting, playback
# of audio, or MaxMindDB mapping of IP addresses.
#
# We now conditionally download optional libraries to support them;
# the default is to download them all.
#
install_libsmi
install_libgpg_error
install_libgcrypt
install_gmp
install_libtasn1
install_p11_kit
install_nettle
install_gnutls
install_lua
install_snappy
install_zstd
install_libxml2
install_lz4
install_sbc
install_maxminddb
install_c_ares
install_libssh
install_nghttp2
install_libtiff
install_spandsp
install_speexdsp
install_bcg729
install_ilbc
install_opus
install_brotli
install_minizip
install_sparkle
}
uninstall_all() {
if [ -d "${MACOSX_SUPPORT_LIBS}" ]
then
cd "${MACOSX_SUPPORT_LIBS}"
#
# Uninstall items in the reverse order from the order in which they're
# installed. Only uninstall if the download/build/install process
# completed; uninstall the version that appears in the name of
# the -done file.
#
# We also do a "make distclean", so that we don't have leftovers from
# old configurations.
#
uninstall_sparkle
uninstall_minizip
uninstall_brotli
uninstall_opus
uninstall_ilbc
uninstall_bcg729
uninstall_speexdsp
uninstall_spandsp
uninstall_libtiff
uninstall_nghttp2
uninstall_libssh
uninstall_c_ares
uninstall_maxminddb
uninstall_snappy
uninstall_zstd
uninstall_libxml2
uninstall_lz4
uninstall_sbc
uninstall_lua
uninstall_gnutls
uninstall_nettle
uninstall_p11_kit
uninstall_libtasn1
uninstall_gmp
uninstall_libgcrypt
uninstall_libgpg_error
uninstall_libsmi
uninstall_qt
uninstall_glib
uninstall_pkg_config
uninstall_gettext
uninstall_ninja
#
# XXX - really remove this?
# Or should we remember it as installed only if this script
# installed it?
#
uninstall_asciidoctorpdf
uninstall_asciidoctor
uninstall_pytest
uninstall_meson
uninstall_python3
uninstall_cmake
uninstall_libtool
uninstall_automake
uninstall_autoconf
uninstall_pcre
uninstall_lzip
uninstall_xz
uninstall_curl
fi
}
#
# Do we have permission to write in /usr/local?
#
# If so, assume we have permission to write in its subdirectories.
# (If that's not the case, this test needs to check the subdirectories
# as well.)
#
# If not, do "make install", "make uninstall", "ninja install",
# "ninja uninstall", the removes for dependencies that don't support
# "make uninstall" or "ninja uninstall", the renames of [g]libtool*,
# and the writing of a libffi .pc file with sudo.
#
if [ -w /usr/local ]
then
DO_MAKE_INSTALL="make install"
DO_MAKE_UNINSTALL="make uninstall"
DO_NINJA_INSTALL="ninja -C _build install"
DO_NINJA_UNINSTALL="ninja -C _build uninstall"
DO_TEE_TO_PC_FILE="tee"
DO_RM="rm"
DO_MV="mv"
else
DO_MAKE_INSTALL="sudo make install"
DO_MAKE_UNINSTALL="sudo make uninstall"
DO_NINJA_INSTALL="sudo ninja -C _build install"
DO_NINJA_UNINSTALL="sudo ninja -C _build uninstall"
DO_TEE_TO_PC_FILE="sudo tee"
DO_RM="sudo rm"
DO_MV="sudo mv"
fi
# This script is meant to be run in the source root. The following
# code will attempt to get you there, but is not perfect (particulary
# if someone copies the script).
topdir=`pwd`/`dirname $0`/..
cd $topdir
# Preference of the support libraries directory:
# ${MACOSX_SUPPORT_LIBS}
# ../macosx-support-libs
# ./macosx-support-libs (default if none exists)
if [ ! -d "${MACOSX_SUPPORT_LIBS}" ]; then
unset MACOSX_SUPPORT_LIBS
fi
if [ -d ../macosx-support-libs ]; then
MACOSX_SUPPORT_LIBS=${MACOSX_SUPPORT_LIBS-../macosx-support-libs}
else
MACOSX_SUPPORT_LIBS=${MACOSX_SUPPORT_LIBS-./macosx-support-libs}
fi
#
# If we have SDKs available, the default target OS is the major version
# of the one we're running; get that and strip off the third component
# if present.
#
for i in /Developer/SDKs \
/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs \
/Library/Developer/CommandLineTools/SDKs
do
if [ -d "$i" ]
then
min_osx_target=`sw_vers -productVersion | sed 's/\([0-9]*\)\.\([0-9]*\)\.[0-9]*/\1.\2/'`
break
fi
done
#
# Parse command-line flags:
#
# -h - print help.
# -t <target> - build libraries so that they'll work on the specified
# version of macOS and later versions.
# -u - do an uninstall.
# -n - download all packages, but don't build or install.
#
no_build=false
while getopts ht:un name
do
case $name in
u)
do_uninstall=yes
;;
n)
no_build=true
;;
t)
min_osx_target="$OPTARG"
;;
h|?)
echo "Usage: macos-setup.sh [ -t <target> ] [ -u ] [ -n ]" 1>&1
exit 0
;;
esac
done
#
# Get the version numbers of installed packages, if any.
#
if [ -d "${MACOSX_SUPPORT_LIBS}" ]
then
cd "${MACOSX_SUPPORT_LIBS}"
installed_xz_version=`ls xz-*-done 2>/dev/null | sed 's/xz-\(.*\)-done/\1/'`
installed_lzip_version=`ls lzip-*-done 2>/dev/null | sed 's/lzip-\(.*\)-done/\1/'`
installed_pcre_version=`ls pcre-*-done 2>/dev/null | sed 's/pcre-\(.*\)-done/\1/'`
installed_pcre2_version=$(ls pcre2-*-done 2>/dev/null | sed 's/pcre2-\(.*\)-done/\1/')
installed_autoconf_version=`ls autoconf-*-done 2>/dev/null | sed 's/autoconf-\(.*\)-done/\1/'`
installed_automake_version=`ls automake-*-done 2>/dev/null | sed 's/automake-\(.*\)-done/\1/'`
installed_libtool_version=`ls libtool-*-done 2>/dev/null | sed 's/libtool-\(.*\)-done/\1/'`
installed_cmake_version=`ls cmake-*-done 2>/dev/null | sed 's/cmake-\(.*\)-done/\1/'`
installed_ninja_version=`ls ninja-*-done 2>/dev/null | sed 's/ninja-\(.*\)-done/\1/'`
installed_asciidoctor_version=`ls asciidoctor-*-done 2>/dev/null | sed 's/asciidoctor-\(.*\)-done/\1/'`
installed_asciidoctorpdf_version=`ls asciidoctorpdf-*-done 2>/dev/null | sed 's/asciidoctorpdf-\(.*\)-done/\1/'`
installed_gettext_version=`ls gettext-*-done 2>/dev/null | sed 's/gettext-\(.*\)-done/\1/'`
installed_pkg_config_version=`ls pkg-config-*-done 2>/dev/null | sed 's/pkg-config-\(.*\)-done/\1/'`
installed_glib_version=`ls glib-*-done 2>/dev/null | sed 's/glib-\(.*\)-done/\1/'`
installed_qt_version=`ls qt-*-done 2>/dev/null | sed 's/qt-\(.*\)-done/\1/'`
installed_libsmi_version=`ls libsmi-*-done 2>/dev/null | sed 's/libsmi-\(.*\)-done/\1/'`
installed_libgpg_error_version=`ls libgpg-error-*-done 2>/dev/null | sed 's/libgpg-error-\(.*\)-done/\1/'`
installed_libgcrypt_version=`ls libgcrypt-*-done 2>/dev/null | sed 's/libgcrypt-\(.*\)-done/\1/'`
installed_gmp_version=`ls gmp-*-done 2>/dev/null | sed 's/gmp-\(.*\)-done/\1/'`
installed_libtasn1_version=`ls libtasn1-*-done 2>/dev/null | sed 's/libtasn1-\(.*\)-done/\1/'`
installed_p11_kit_version=`ls p11-kit-*-done 2>/dev/null | sed 's/p11-kit-\(.*\)-done/\1/'`
installed_nettle_version=`ls nettle-*-done 2>/dev/null | sed 's/nettle-\(.*\)-done/\1/'`
installed_gnutls_version=`ls gnutls-*-done 2>/dev/null | sed 's/gnutls-\(.*\)-done/\1/'`
installed_lua_version=`ls lua-*-done 2>/dev/null | sed 's/lua-\(.*\)-done/\1/'`
installed_snappy_version=`ls snappy-*-done 2>/dev/null | sed 's/snappy-\(.*\)-done/\1/'`
installed_zstd_version=`ls zstd-*-done 2>/dev/null | sed 's/zstd-\(.*\)-done/\1/'`
installed_libxml2_version=`ls libxml2-*-done 2>/dev/null | sed 's/libxml2-\(.*\)-done/\1/'`
installed_lz4_version=`ls lz4-*-done 2>/dev/null | sed 's/lz4-\(.*\)-done/\1/'`
installed_sbc_version=`ls sbc-*-done 2>/dev/null | sed 's/sbc-\(.*\)-done/\1/'`
installed_maxminddb_version=`ls maxminddb-*-done 2>/dev/null | sed 's/maxminddb-\(.*\)-done/\1/'`
installed_cares_version=`ls c-ares-*-done 2>/dev/null | sed 's/c-ares-\(.*\)-done/\1/'`
installed_libssh_version=`ls libssh-*-done 2>/dev/null | sed 's/libssh-\(.*\)-done/\1/'`
installed_nghttp2_version=`ls nghttp2-*-done 2>/dev/null | sed 's/nghttp2-\(.*\)-done/\1/'`
installed_libtiff_version=`ls tiff-*-done 2>/dev/null | sed 's/tiff-\(.*\)-done/\1/'`
installed_spandsp_version=`ls spandsp-*-done 2>/dev/null | sed 's/spandsp-\(.*\)-done/\1/'`
installed_speexdsp_version=`ls speexdsp-*-done 2>/dev/null | sed 's/speexdsp-\(.*\)-done/\1/'`
installed_bcg729_version=`ls bcg729-*-done 2>/dev/null | sed 's/bcg729-\(.*\)-done/\1/'`
installed_ilbc_version=`ls ilbc-*-done 2>/dev/null | sed 's/ilbc-\(.*\)-done/\1/'`
installed_opus_version=`ls opus-*-done 2>/dev/null | sed 's/opus-\(.*\)-done/\1/'`
installed_python3_version=`ls python3-*-done 2>/dev/null | sed 's/python3-\(.*\)-done/\1/'`
installed_brotli_version=`ls brotli-*-done 2>/dev/null | sed 's/brotli-\(.*\)-done/\1/'`
installed_minizip_version=`ls minizip-*-done 2>/dev/null | sed 's/minizip-\(.*\)-done/\1/'`
installed_sparkle_version=`ls sparkle-*-done 2>/dev/null | sed 's/sparkle-\(.*\)-done/\1/'`
cd $topdir
fi
if [ "$do_uninstall" = "yes" ]
then
uninstall_all
exit 0
fi
#
# Configure scripts tend to set CFLAGS and CXXFLAGS to "-g -O2" if
# invoked without CFLAGS or CXXFLAGS being set in the environment.
#
# However, we *are* setting them in the environment, for our own
# nefarious purposes, so start them out as "-g -O2".
#
CFLAGS="-g -O2"
CXXFLAGS="-g -O2"
# if no make options are present, set default options
if [ -z "$MAKE_BUILD_OPTS" ] ; then
# by default use 1.5x number of cores for parallel build
MAKE_BUILD_OPTS="-j $(( $(sysctl -n hw.logicalcpu) * 3 / 2))"
fi
#
# If we have a target release, look for the oldest SDK that's for an
# OS equal to or later than that one, and build libraries against it
# rather than against the headers and, more importantly, libraries
# that come with the OS, so that we don't end up with support libraries
# that only work on the OS version on which we built them, not earlier
# versions of the same release, or earlier releases if the minimum is
# earlier.
#
if [ ! -z "$min_osx_target" ]
then
#
# Get the major and minor version of the target release.
# We assume it'll be a while before there's a macOS 100. :-)
#
case "$min_osx_target" in
[1-9][0-9].*)
#
# major.minor.
#
min_osx_target_major=`echo "$min_osx_target" | sed -n 's/\([1-9][0-9]*\)\..*/\1/p'`
min_osx_target_minor=`echo "$min_osx_target" | sed -n 's/[1-9][0-9]*\.\(.*\)/\1/p'`
;;
[1-9][0-9])
#
# Just a major version number was specified; make the minor
# version 0.
#
min_osx_target_major="$min_osx_target"
min_osx_target_minor=0
;;
*)
echo "macosx-setup.sh: Invalid target release $min_osx_target" 1>&2
exit 1
;;
esac
#
# Search each directory that might contain SDKs.
#
sdkpath=""
for sdksdir in /Developer/SDKs \
/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs \
/Library/Developer/CommandLineTools/SDKs
do
#
# Get a list of all the SDKs.
#
if ! test -d "$sdksdir"
then
#
# There is no directory with that name.
# Move on to the next one in the list, if any.
#
continue
fi
#
# Get a list of all the SDKs in that directory, if any.
# We assume it'll be a while before there's a macOS 100. :-)
#
sdklist=`(cd "$sdksdir"; ls -d MacOSX[1-9][0-9].[0-9]*.sdk 2>/dev/null)`
for sdk in $sdklist
do
#
# Get the major and minor version for this SDK.
#
sdk_major=`echo "$sdk" | sed -n 's/MacOSX\([1-9][0-9]*\)\..*\.sdk/\1/p'`
sdk_minor=`echo "$sdk" | sed -n 's/MacOSX[1-9][0-9]*\.\(.*\)\.sdk/\1/p'`
#
# Is it for the deployment target or some later release?
# Starting with major 11, the minor version no longer matters.
#
if test "$sdk_major" -gt "$min_osx_target_major" -o \
\( "$sdk_major" -eq "$min_osx_target_major" -a \
\( "$sdk_major" -ge 11 -o \
"$sdk_minor" -ge "$min_osx_target_minor" \) \)
then
#
# Yes, use it.
#
sdkpath="$sdksdir/$sdk"
break 2
fi
done
done
if [ -z "$sdkpath" ]
then
echo "macos-setup.sh: Couldn't find an SDK for macOS $min_osx_target or later" 1>&2
exit 1
fi
SDKPATH="$sdkpath"
echo "Using the $sdk_major.$sdk_minor SDK"
#
# Make sure there are links to /usr/local/include and /usr/local/lib
# in the SDK's usr/local.
#
if [ ! -e $SDKPATH/usr/local/include ]
then
if [ ! -d $SDKPATH/usr/local ]
then
sudo mkdir $SDKPATH/usr/local
fi
sudo ln -s /usr/local/include $SDKPATH/usr/local/include
fi
if [ ! -e $SDKPATH/usr/local/lib ]
then
if [ ! -d $SDKPATH/usr/local ]
then
sudo mkdir $SDKPATH/usr/local
fi
sudo ln -s /usr/local/lib $SDKPATH/usr/local/lib
fi
#
# Set the minimum OS version for which to build to the specified
# minimum target OS version, so we don't, for example, end up using
# linker features supported by the OS verson on which we're building
# but not by the target version.
#
VERSION_MIN_FLAGS="-mmacosx-version-min=$min_osx_target"
#
# Compile and link against the SDK.
#
SDKFLAGS="-isysroot $SDKPATH"
fi
export CFLAGS
export CXXFLAGS
#
# You need Xcode or the command-line tools installed to get the compilers (xcrun checks both).
#
if [ ! -x /usr/bin/xcrun ]; then
echo "Please install Xcode (app or command line) first (should be available on DVD or from the Mac App Store)."
exit 1
fi
if [ "$QT_VERSION" ]; then
#
# We need Xcode, not just the command-line tools, installed to build
# Qt.
#
# At least with Xcode 8, /usr/bin/xcodebuild --help fails if only
# the command-line tools are installed and succeeds if Xcode is
# installed. Unfortunately, it fails *with* Xcode 3, but
# /usr/bin/xcodebuild -version works with that and with Xcode 8.
# Hopefully it fails with only the command-line tools installed.
#
if /usr/bin/xcodebuild -version >/dev/null 2>&1; then
:
elif qmake --version >/dev/null 2>&1; then
:
else
echo "Please install Xcode first (should be available on DVD or from the Mac App Store)."
echo "The command-line build tools are not sufficient to build Qt."
echo "Alternatively build QT according to: https://gist.github.com/shoogle/750a330c851bd1a924dfe1346b0b4a08#:~:text=MacOS%2FQt%5C%20Creator-,Go%20to%20Qt%20Creator%20%3E%20Preferences%20%3E%20Build%20%26%20Run%20%3E%20Kits,for%20both%20compilers%2C%20not%20gcc%20."
exit 1
fi
fi
export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig
#
# Do all the downloads and untarring in a subdirectory, so all that
# stuff can be removed once we've installed the support libraries.
if [ ! -d "${MACOSX_SUPPORT_LIBS}" ]
then
mkdir "${MACOSX_SUPPORT_LIBS}" || exit 1
fi
cd "${MACOSX_SUPPORT_LIBS}"
install_all
echo ""
#
# Indicate what paths to use for pkg-config and cmake.
#
pkg_config_path=/usr/local/lib/pkgconfig
if [ "$QT_VERSION" ]; then
qt_base_path=$HOME/Qt$QT_VERSION/$QT_VERSION/clang_64
pkg_config_path="$pkg_config_path":"$qt_base_path/lib/pkgconfig"
CMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH":"$qt_base_path/lib/cmake"
fi
if $no_build; then
echo "All required dependencies downloaded. Run without -n to install them."
exit 0
fi
if [ "$QT_VERSION" ]; then
if [ -f qt-$QT_VERSION-done ]; then
echo "You are now prepared to build Wireshark."
else
echo "Qt was not installed; you will have to install it in order to build the"
echo "Wireshark application, but you can build all the command-line tools in"
echo "the Wireshark distribution."
echo ""
echo "See section 2.1.1. \"Build environment setup\" of the Wireshark Developer's"
echo "Guide for instructions on how to install Qt."
fi
else
echo "You did not install Qt; you will have to install it in order to build"
echo "the Wireshark application, but you can build all the command-line tools in"
echo "the Wireshark distribution."
fi
echo
echo "To build:"
echo
echo "export PKG_CONFIG_PATH=$pkg_config_path"
echo "export CMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH"
echo "export PATH=$PATH:$qt_base_path/bin"
echo
echo "mkdir build; cd build"
if [ ! -z "$NINJA_VERSION" ]; then
echo "cmake -G Ninja .."
echo "ninja wireshark_app_bundle logray_app_bundle # (Modify as needed)"
echo "ninja install/strip"
else
echo "cmake .."
echo "make $MAKE_BUILD_OPTS wireshark_app_bundle logray_app_bundle # (Modify as needed)"
echo "make install/strip"
fi
echo
echo "Make sure you are allowed capture access to the network devices"
echo "See: https://gitlab.com/wireshark/wireshark/-/wikis/CaptureSetup/CapturePrivileges"
echo
exit 0 |
Python | wireshark/tools/make-authors-csv.py | #!/usr/bin/env python3
#
# Generate the authors.csv file.
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
'''\
Remove tasks from individual author entries from the AUTHORS file
for use in the "About" dialog.
'''
import io
import re
import sys
def remove_tasks(stdinu8):
in_subinfo = False
all_lines = []
# Assume the first line is blank and skip it. make-authors-short.pl
# skipped over the UTF-8 BOM as well. Do we need to do that here?
stdinu8.readline()
for line in stdinu8:
sub_m = re.search(r'(.*?)\s*\{', line)
if sub_m:
in_subinfo = True
all_lines.append(sub_m.group(1))
elif '}' in line:
in_subinfo = False
nextline = next(stdinu8)
if not re.match('^\s*$', nextline):
# if '{' in nextline:
# stderru8.write("No blank line after '}', found " + nextline)
all_lines.append(nextline)
elif in_subinfo:
continue
else:
all_lines.append(line)
return all_lines
def main():
stdinu8 = io.TextIOWrapper(sys.stdin.buffer, encoding='utf8')
stdoutu8 = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
stderru8 = io.TextIOWrapper(sys.stderr.buffer, encoding='utf8')
lines = remove_tasks(stdinu8)
patt = re.compile("(.*)[<(]([\\s'a-zA-Z0-9._%+-]+(\\[[Aa][Tt]\\])?[a-zA-Z0-9._%+-]+)[>)]")
for line in lines:
match = patt.match(line)
if match:
name = match.group(1).strip()
mail = match.group(2).strip().replace("[AT]", "@")
stdoutu8.write("{},{}\n".format(name, mail))
if __name__ == '__main__':
main() |
Python | wireshark/tools/make-enterprises.py | #!/usr/bin/env python3
# create the enterprises.c file from
# https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers
# or an offline copy
#
# Copyright 2022 by Moshe Kaplan
# Based on make-sminmpec.pl by Gerald Combs
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 2004 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
import os
import argparse
import re
import urllib.request
ENTERPRISES_CFILE = os.path.join('epan', 'enterprises.c')
ENTERPRISE_NUMBERS_URL = "https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers"
DECIMAL_PATTERN = r"^(\d+)"
# up to three spaces because of formatting errors in the source
ORGANIZATION_PATTERN = r"^ ?(\S.*)"
FORMERLY_PATTERN = r" \(((formerly|previously) .*)\)"
LOOKUP_FUNCTION = r"""
const char* global_enterprises_lookup(uint32_t value)
{
if (value > table.max_idx) {
return NULL;
}
else return table.values[value];
}
"""
DUMP_FUNCTION = r"""
void global_enterprises_dump(FILE *fp)
{
for (size_t idx = 0; idx <= table.max_idx; idx++) {
if (table.values[idx] != NULL) {
fprintf(fp, "%zu\t%s\n", idx, table.values[idx]);
}
}
}
"""
# This intermediate format is no longer written to a file - returned as string
def generate_enterprise_entries(file_content):
# We only care about the "Decimal" and "Organization",
# not the contact or email
org_lines = []
last_updated = ""
end_seen = False
for line in file_content.splitlines():
decimal_match = re.match(DECIMAL_PATTERN, line)
if decimal_match:
decimal = decimal_match.group(0)
elif re.match(ORGANIZATION_PATTERN, line):
organization = line.strip()
if organization.lower() == "unassigned":
continue
organization = re.sub(FORMERLY_PATTERN, r"\t# \1", organization)
org_lines += [decimal + "\t" + organization]
elif "last updated" in line.lower():
last_updated = line
elif "end of document" in line.lower():
end_seen = True
if not end_seen:
raise Exception('"End of Document" not found. Truncated source file?')
last_updated_line = "/* " + last_updated + " */\n\n"
output = "\n".join(org_lines) + "\n"
return (output,last_updated_line)
class CFile:
def __init__(self, filename, last_updated_line):
self.filename = filename
self.f = open(filename, 'w')
self.mappings = {}
self.highest_num = 0
# Write file header
self.f.write('/* ' + os.path.basename(self.filename) + '\n')
self.f.write(' *\n')
self.f.write(' * Wireshark - Network traffic analyzer\n')
self.f.write(' * By Gerald Combs <[email protected]>\n')
self.f.write(' * Copyright 1998 Gerald Combs\n')
self.f.write(' *\n')
self.f.write(' * Do not edit - this file is automatically generated\n')
self.f.write(' * SPDX-License-Identifier: GPL-2.0-or-later\n')
self.f.write(' */\n\n')
self.f.write(last_updated_line)
# Include header files
self.f.write('#include "config.h"\n\n')
self.f.write('#include <stddef.h>\n')
self.f.write('#include "enterprises.h"\n')
self.f.write('\n\n')
def __del__(self):
self.f.write('typedef struct\n')
self.f.write('{\n')
self.f.write(' uint32_t max_idx;\n')
self.f.write(' const char* values[' + str(self.highest_num+1) + '];\n')
self.f.write('} global_enterprises_table_t;\n\n')
# Write static table
self.f.write('static global_enterprises_table_t table =\n')
self.f.write('{\n')
# Largest index
self.f.write(' ' + str(self.highest_num) + ',\n')
self.f.write(' {\n')
# Entries (read from dict)
for n in range(0, self.highest_num+1):
if n not in self.mappings:
# There are some gaps, write a NULL entry so can lookup by index
line = ' NULL'
else:
line = ' "' + self.mappings[n] + '"'
# Add coma.
if n < self.highest_num:
line += ','
# Add number as aligned comment.
line += ' '*(90-len(line)) + '// ' + str(n)
self.f.write(line+'\n')
# End of array
self.f.write(' }\n')
# End of struct
self.f.write('};\n')
print('Re-generated', self.filename)
# Lookup function
self.f.write(LOOKUP_FUNCTION)
# Dump function
self.f.write(DUMP_FUNCTION)
# Add an individual mapping to the function
def addMapping(self, num, name):
# Handle some escapings
name = name.replace('\\', '\\\\')
name = name.replace('"', '""')
# Record.
self.mappings[num] = name
self.highest_num = num if num>self.highest_num else self.highest_num
def main():
parser = argparse.ArgumentParser(description="Create the {} file.".format(ENTERPRISES_CFILE))
parser.add_argument('--infile')
parser.add_argument('outfile', nargs='?', default=ENTERPRISES_CFILE)
parsed_args = parser.parse_args()
# Read data from file or webpage
if parsed_args.infile:
with open(parsed_args.infile, encoding='utf-8') as fh:
data = fh.read()
else:
with urllib.request.urlopen(ENTERPRISE_NUMBERS_URL) as f:
if f.status != 200:
raise Exception("request for " + ENTERPRISE_NUMBERS_URL + " failed with result code " + f.status)
data = f.read().decode('utf-8')
# Find bits we need and generate enterprise entries
enterprises_content,last_updated_line = generate_enterprise_entries(data)
# Now write to a C file the contents (which is faster than parsing the global file at runtime).
c_file = CFile(parsed_args.outfile, last_updated_line)
mapping_re = re.compile(r'^(\d+)\s+(.*)$')
for line in enterprises_content.splitlines():
match = mapping_re.match(line)
if match:
num, name = match.group(1), match.group(2)
# Strip any comments and/or trailing whitespace
idx = name.find('#')
if idx != -1:
name = name[0:idx]
name = name.rstrip()
# Add
c_file.addMapping(int(num), name)
if __name__ == "__main__":
main() |
Python | wireshark/tools/make-enums.py | #!/usr/bin/env python3
#
# Copyright 2021, João Valverde <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
#
# Uses pyclibrary to parse C headers for enums and integer macro
# definitions. Exports that data to a C file for the introspection API.
#
# Requires: https://github.com/MatthieuDartiailh/pyclibrary
#
import os
import sys
import argparse
from pyclibrary import CParser
def parse_files(infiles, outfile):
print("Input: {}".format(infiles))
print("Output: '{}'".format(outfile))
parser = CParser(infiles)
source = """\
/*
* Wireshark - Network traffic analyzer
* By Gerald Combs <[email protected]>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*
* Generated automatically from %s. It can be re-created by running
* "tools/make-enums.py" from the top source directory.
*
* It is fine to edit this file by hand. Particularly if a symbol
* disappears from the API it can just be removed here. There is no
* requirement to re-run the generator script.
*
*/
""" % (os.path.basename(sys.argv[0]))
for f in infiles:
source += '#include <{}>\n'.format(f)
source += """
#define ENUM(arg) { #arg, arg }
static ws_enum_t all_enums[] = {
"""
definitions = parser.defs['values']
symbols = list(definitions.keys())
symbols.sort()
for s in symbols:
if isinstance(definitions[s], int):
source += ' ENUM({}),\n'.format(s)
source += """\
{ NULL, 0 },
};
"""
try:
fh = open(outfile, 'w')
except OSError:
sys.exit('Unable to write ' + outfile + '.\n')
fh.write(source)
fh.close()
epan_files = [
"epan/address.h",
"epan/ipproto.h",
"epan/proto.h",
"epan/ftypes/ftypes.h",
"epan/stat_groups.h",
]
parse_files(epan_files, "epan/introspection-enums.c")
wtap_files = [
"wiretap/wtap.h",
]
parse_files(wtap_files, "wiretap/introspection-enums.c")
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
# |
Python | wireshark/tools/make-manuf.py | #!/usr/bin/env python3
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
'''Update the "manuf" file.
Make-manuf creates a file containing ethernet OUIs and their company
IDs from the databases at IEEE.
'''
import csv
import html
import io
import os
import re
import sys
import urllib.request, urllib.error, urllib.parse
have_icu = False
try:
# Use the grapheme or segments module instead?
import icu
have_icu = True
except ImportError:
pass
def exit_msg(msg=None, status=1):
if msg is not None:
sys.stderr.write(msg + '\n\n')
sys.stderr.write(__doc__ + '\n')
sys.exit(status)
def open_url(url):
'''Open a URL.
Returns a tuple containing the body and response dict. The body is a
str in Python 3 and bytes in Python 2 in order to be compatibile with
csv.reader.
'''
if len(sys.argv) > 1:
url_path = os.path.join(sys.argv[1], url[1])
url_fd = open(url_path)
body = url_fd.read()
url_fd.close()
else:
url_path = '/'.join(url)
req_headers = { 'User-Agent': 'Wireshark make-manuf' }
try:
req = urllib.request.Request(url_path, headers=req_headers)
response = urllib.request.urlopen(req)
body = response.read().decode('UTF-8', 'replace')
except Exception:
exit_msg('Error opening ' + url_path)
return body
# These are applied after punctuation has been removed.
# More examples at https://en.wikipedia.org/wiki/Incorporation_(business)
general_terms = '|'.join([
' a +s\\b', # A/S and A.S. but not "As" as in "Connect As".
' ab\\b', # Also follows "Oy", which is covered below.
' ag\\b',
' b ?v\\b',
' closed joint stock company\\b',
' co\\b',
' company\\b',
' corp\\b',
' corporation\\b',
' corporate\\b',
' de c ?v\\b', # Follows "S.A.", which is covered separately below.
' gmbh\\b',
' holding\\b',
' inc\\b',
' incorporated\\b',
' jsc\\b',
' kg\\b',
' k k\\b', # "K.K." as in "kabushiki kaisha", but not "K+K" as in "K+K Messtechnik".
' limited\\b',
' llc\\b',
' ltd\\b',
' n ?v\\b',
' oao\\b',
' of\\b',
' open joint stock company\\b',
' ooo\\b',
' oü\\b',
' oy\\b',
' oyj\\b',
' plc\\b',
' pty\\b',
' pvt\\b',
' s ?a ?r ?l\\b',
' s ?a\\b',
' s ?p ?a\\b',
' sp ?k\\b',
' s ?r ?l\\b',
' systems\\b',
'\\bthe\\b',
' zao\\b',
' z ?o ?o\\b'
])
# Chinese company names tend to start with the location, skip it (non-exhaustive list).
skip_start = [
'shengzen',
'shenzhen',
'beijing',
'shanghai',
'wuhan',
'hangzhou',
'guangxi',
]
# Special cases handled directly
special_case = {
"Advanced Micro Devices": "AMD",
}
def shorten(manuf):
'''Convert a long manufacturer name to abbreviated and short names'''
# Normalize whitespace.
manuf = ' '.join(manuf.split())
orig_manuf = manuf
# Convert all caps to title case
if manuf.isupper():
manuf = manuf.title()
# Remove the contents of parenthesis as ancillary data
manuf = re.sub(r"\(.*\)", '', manuf)
# Remove "a" before removing punctuation ("Aruba, a Hewlett [...]" etc.)
manuf = manuf.replace(" a ", " ")
# Remove any punctuation
# XXX Use string.punctuation? Note that it includes '-' and '*'.
manuf = re.sub(r"[\"',./:()+-]", ' ', manuf)
# & isn't needed when Standalone
manuf = manuf.replace(" & ", " ")
# Remove business types and other general terms ("the", "inc", "plc", etc.)
plain_manuf = re.sub(general_terms, '', manuf, flags=re.IGNORECASE)
# ...but make sure we don't remove everything.
if not all(s == ' ' for s in plain_manuf):
manuf = plain_manuf
manuf = manuf.strip()
# Check for special case
if manuf in special_case.keys():
manuf = special_case[manuf]
split = manuf.split()
if len(split) > 1 and split[0].lower() in skip_start:
manuf = ' '.join(split[1:])
# Remove all spaces
manuf = re.sub(r'\s+', '', manuf)
if len(manuf) < 1:
sys.stderr.write('Manufacturer "{}" shortened to nothing.\n'.format(orig_manuf))
sys.exit(1)
# Truncate names to a reasonable length, say, 12 characters. If
# the string contains UTF-8, this may be substantially more than
# 12 bytes. It might also be less than 12 visible characters. Plain
# Python slices Unicode strings by code point, which is better
# than raw bytes but not as good as grapheme clusters. PyICU
# supports grapheme clusters. https://bugs.python.org/issue30717
#
# Truncate by code points
trunc_len = 12
if have_icu:
# Truncate by grapheme clusters
bi_ci = icu.BreakIterator.createCharacterInstance(icu.Locale('en_US'))
bi_ci.setText(manuf)
bounds = list(bi_ci)
bounds = bounds[0:trunc_len]
trunc_len = bounds[-1]
manuf = manuf[:trunc_len]
if manuf.lower() == orig_manuf.lower():
# Original manufacturer name was short and simple.
return [manuf, None]
mixed_manuf = orig_manuf
# At least one entry has whitespace in front of a period.
mixed_manuf = re.sub(r'\s+\.', '.', mixed_manuf)
#If company is all caps, convert to mixed case (so it doesn't look like we're screaming the company name)
if mixed_manuf.upper() == mixed_manuf:
mixed_manuf = mixed_manuf.title()
return [manuf, mixed_manuf]
MA_L = 'MA_L'
MA_M = 'MA_M'
MA_S = 'MA_S'
def prefix_to_oui(prefix, prefix_map):
pfx_len = int(len(prefix) * 8 / 2)
prefix24 = prefix[:6]
oui24 = ':'.join(hi + lo for hi, lo in zip(prefix24[0::2], prefix24[1::2]))
if pfx_len == 24:
# 24-bit OUI assignment, no mask
return oui24, MA_L
# Other lengths which require a mask.
oui = prefix.ljust(12, '0')
oui = ':'.join(hi + lo for hi, lo in zip(oui[0::2], oui[1::2]))
if pfx_len == 28:
kind = MA_M
elif pfx_len == 36:
kind = MA_S
prefix_map[oui24] = kind
return '{}/{:d}'.format(oui, int(pfx_len)), kind
def main():
this_dir = os.path.dirname(__file__)
manuf_path = os.path.join('epan', 'manuf-data.c')
ieee_d = {
'OUI': { 'url': ["https://standards-oui.ieee.org/oui/", "oui.csv"], 'min_entries': 1000 },
'CID': { 'url': ["https://standards-oui.ieee.org/cid/", "cid.csv"], 'min_entries': 75 },
'IAB': { 'url': ["https://standards-oui.ieee.org/iab/", "iab.csv"], 'min_entries': 1000 },
'OUI28': { 'url': ["https://standards-oui.ieee.org/oui28/", "mam.csv"], 'min_entries': 1000 },
'OUI36': { 'url': ["https://standards-oui.ieee.org/oui36/", "oui36.csv"], 'min_entries': 1000 },
}
oui_d = {
MA_L: {},
MA_M: {},
MA_S: {},
}
min_total = 35000; # 35830 as of 2018-09-05
total_added = 0
# Add IEEE entries from each of their databases
ieee_db_l = ['OUI', 'OUI28', 'OUI36', 'CID', 'IAB']
# map a 24-bit prefix to MA-M/MA-S or none (MA-L by default)
prefix_map = {}
for db in ieee_db_l:
db_url = ieee_d[db]['url']
ieee_d[db]['skipped'] = 0
ieee_d[db]['added'] = 0
ieee_d[db]['total'] = 0
print('Merging {} data from {}'.format(db, db_url))
body = open_url(db_url)
ieee_csv = csv.reader(body.splitlines())
# Pop the title row.
next(ieee_csv)
for ieee_row in ieee_csv:
#Registry,Assignment,Organization Name,Organization Address
#IAB,0050C2DD6,Transas Marine Limited,Datavagen 37 Askim Vastra Gotaland SE 436 32
oui, kind = prefix_to_oui(ieee_row[1].upper(), prefix_map)
manuf = ieee_row[2].strip()
# The Organization Name field occasionally contains HTML entities. Undo them.
manuf = html.unescape(manuf)
if manuf == 'IEEE Registration Authority':
continue
if manuf == 'Private':
continue
if oui in oui_d[kind]:
action = 'Skipping'
print('{} - {} IEEE "{}" in favor of "{}"'.format(oui, action, manuf, oui_d[kind][oui]))
ieee_d[db]['skipped'] += 1
else:
oui_d[kind][oui] = shorten(manuf)
ieee_d[db]['added'] += 1
ieee_d[db]['total'] += 1
if ieee_d[db]['total'] < ieee_d[db]['min_entries']:
exit_msg("Too few {} entries. Got {}, wanted {}".format(db, ieee_d[db]['total'], ieee_d[db]['min_entries']))
total_added += ieee_d[db]['total']
if total_added < min_total:
exit_msg("Too few total entries ({})".format(total_added))
try:
manuf_fd = io.open(manuf_path, 'w', encoding='UTF-8')
except Exception:
exit_msg("Couldn't open manuf file for reading ({}) ".format(manuf_path))
manuf_fd.write('''/*
* This file was generated by running ./tools/make-manuf.py.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*
* The data below has been assembled from the following sources:
*
* The IEEE public OUI listings available from:
* <http://standards-oui.ieee.org/oui/oui.csv>
* <http://standards-oui.ieee.org/cid/cid.csv>
* <http://standards-oui.ieee.org/iab/iab.csv>
* <http://standards-oui.ieee.org/oui28/mam.csv>
* <http://standards-oui.ieee.org/oui36/oui36.csv>
*
*/
''')
# Write the prefix map
manuf_fd.write("static manuf_registry_t ieee_registry_table[] = {\n")
keys = list(prefix_map.keys())
keys.sort()
for oui in keys:
manuf_fd.write(" {{ {{ 0x{}, 0x{}, 0x{} }}, {} }},\n".format(oui[0:2], oui[3:5], oui[6:8], prefix_map[oui]))
manuf_fd.write("};\n\n")
# write the MA-L table
manuf_fd.write("static manuf_oui24_t global_manuf_oui24_table[] = {\n")
keys = list(oui_d[MA_L].keys())
keys.sort()
for oui in keys:
short = oui_d[MA_L][oui][0]
if oui_d[MA_L][oui][1]:
long = oui_d[MA_L][oui][1]
else:
long = short
line = " {{ {{ 0x{}, 0x{}, 0x{} }}, \"{}\", ".format(oui[0:2], oui[3:5], oui[6:8], short)
sep = 44 - len(line)
if sep <= 0:
sep = 0
line += sep * ' '
line += "\"{}\" }},\n".format(long.replace('"', '\\"'))
manuf_fd.write(line)
manuf_fd.write("};\n\n")
# write the MA-M table
manuf_fd.write("static manuf_oui28_t global_manuf_oui28_table[] = {\n")
keys = list(oui_d[MA_M].keys())
keys.sort()
for oui in keys:
short = oui_d[MA_M][oui][0]
if oui_d[MA_M][oui][1]:
long = oui_d[MA_M][oui][1]
else:
long = short
line = " {{ {{ 0x{}, 0x{}, 0x{}, 0x{} }}, \"{}\", ".format(oui[0:2], oui[3:5], oui[6:8], oui[9:11], short)
sep = 50 - len(line)
if sep <= 0:
sep = 0
line += sep * ' '
line += "\"{}\" }},\n".format(long.replace('"', '\\"'))
manuf_fd.write(line)
manuf_fd.write("};\n\n")
#write the MA-S table
manuf_fd.write("static manuf_oui36_t global_manuf_oui36_table[] = {\n")
keys = list(oui_d[MA_S].keys())
keys.sort()
for oui in keys:
short = oui_d[MA_S][oui][0]
if oui_d[MA_S][oui][1]:
long = oui_d[MA_S][oui][1]
else:
long = short
line = " {{ {{ 0x{}, 0x{}, 0x{}, 0x{}, 0x{} }}, \"{}\", ".format(oui[0:2], oui[3:5], oui[6:8], oui[9:11], oui[12:14], short)
sep = 56 - len(line)
if sep <= 0:
sep = 0
line += sep * ' '
line += "\"{}\" }},\n".format(long.replace('"', '\\"'))
manuf_fd.write(line)
manuf_fd.write("};\n")
manuf_fd.close()
for db in ieee_d:
print('{:<20}: {}'.format('IEEE ' + db + ' added', ieee_d[db]['added']))
print('{:<20}: {}'.format('Total added', total_added))
print()
for db in ieee_d:
print('{:<20}: {}'.format('IEEE ' + db + ' total', ieee_d[db]['total']))
print()
for db in ieee_d:
print('{:<20}: {}'.format('IEEE ' + db + ' skipped', ieee_d[db]['skipped']))
if __name__ == '__main__':
main() |
Python | wireshark/tools/make-no-reassembly-profile.py | #!/usr/bin/env python3
#
# Generate preferences for a "No Reassembly" profile.
# By Gerald Combs <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
'''Generate preferences for a "No Reassembly" profile.'''
import argparse
import os.path
import re
import subprocess
import sys
MIN_PLUGINS = 10
def main():
parser = argparse.ArgumentParser(description='No reassembly profile generator')
parser.add_argument('-p', '--program-path', default=os.path.curdir, help='Path to TShark.')
parser.add_argument('-v', '--verbose', action='store_const', const=True, default=False, help='Verbose output.')
args = parser.parse_args()
this_dir = os.path.dirname(__file__)
profile_path = os.path.join(this_dir, '..', 'resources', 'share', 'wireshark', 'profiles', 'No Reassembly', 'preferences')
tshark_path = os.path.join(args.program_path, 'tshark')
if not os.path.isfile(tshark_path):
print('tshark not found at {}\n'.format(tshark_path))
parser.print_usage()
sys.exit(1)
# Make sure plugin prefs are present.
cp = subprocess.run([tshark_path, '-G', 'plugins'], stdout=subprocess.PIPE, check=True, encoding='utf-8')
plugin_lines = cp.stdout.splitlines()
dissector_count = len(tuple(filter(lambda p: re.search('\sdissector\s', p), plugin_lines)))
if dissector_count < MIN_PLUGINS:
print('Found {} plugins but require {}.'.format(dissector_count, MIN_PLUGINS))
sys.exit(1)
rd_pref_re = re.compile('^#\s*(.*(reassembl|desegment)\S*):\s*TRUE')
out_prefs = [
'# Generated by ' + os.path.basename(__file__), '',
'####### Protocols ########', '',
]
cp = subprocess.run([tshark_path, '-G', 'defaultprefs'], stdout=subprocess.PIPE, check=True, encoding='utf-8')
pref_lines = cp.stdout.splitlines()
for pref_line in pref_lines:
m = rd_pref_re.search(pref_line)
if m:
rd_pref = m.group(1) + ': FALSE'
if args.verbose is True:
print(rd_pref)
out_prefs.append(rd_pref)
if len(pref_lines) < 5000:
print("Too few preference lines.")
sys.exit(1)
if len(out_prefs) < 150:
print("Too few changed preferences.")
sys.exit(1)
with open(profile_path, 'w') as profile_f:
for pref_line in out_prefs:
profile_f.write(pref_line + '\n')
if __name__ == '__main__':
main() |
Python | wireshark/tools/make-packet-dcm.py | #!/usr/bin/env python3
import os.path
import sys
import itertools
import lxml.etree
# This utility scrapes the DICOM standard document in DocBook format, finds the appropriate tables,
# and extracts the data needed to build the lists of DICOM attributes, UIDs and value representations.
# If the files part05.xml, part06.xml and part07.xml exist in the current directory, use them.
# Otherwise, download the current release from the current DICOM official sources.
if os.path.exists("part05.xml"):
print("Using local part05 docbook.", file=sys.stderr)
part05 = lxml.etree.parse("part05.xml")
else:
print("Downloading part05 docbook...", file=sys.stderr)
part05 = lxml.etree.parse("http://dicom.nema.org/medical/dicom/current/source/docbook/part05/part05.xml")
if os.path.exists("part06.xml"):
print("Using local part06 docbook.", file=sys.stderr)
part06 = lxml.etree.parse("part06.xml")
else:
print("Downloading part06 docbook...", file=sys.stderr)
part06 = lxml.etree.parse("http://dicom.nema.org/medical/dicom/current/source/docbook/part06/part06.xml")
if os.path.exists("part07.xml"):
print("Using local part07 docbook.", file=sys.stderr)
part07 = lxml.etree.parse("part07.xml")
else:
print("Downloading part07 docbook...", file=sys.stderr)
part07 = lxml.etree.parse("http://dicom.nema.org/medical/dicom/current/source/docbook/part07/part07.xml")
dbns = {'db':'http://docbook.org/ns/docbook', 'xml':'http://www.w3.org/XML/1998/namespace'}
# When displaying the dissected packets, some attributes are nice to include in the description of their parent.
include_in_parent = {"Patient Position",
"ROI Number",
"ROI Name",
"Contour Geometric Type",
"Observation Number",
"ROI Observation Label",
"RT ROI Interpreted Type",
"Dose Reference Structure Type",
"Dose Reference Description",
"Dose Reference Type",
"Target Prescription Dose",
"Tolerance Table Label",
"Beam Limiting Device Position Tolerance",
"Number of Fractions Planned",
"Treatment Machine Name",
"RT Beam Limiting Device Type",
"Beam Number",
"Beam Name",
"Beam Type",
"Radiation Type",
"Wedge Type",
"Wedge ID",
"Wedge Angle",
"Material ID",
"Block Tray ID",
"Block Name",
"Applicator ID",
"Applicator Type",
"Control Point Index",
"Nominal Beam Energy",
"Cumulative Meterset Weight",
"Patient Setup Number"}
# Data elements are listed in three tables in Part 6:
# * Table 6-1. Registry of DICOM Data Elements
# * Table 7-1. Registry of DICOM File Meta Elements
# * Table 8-1. Registry of DICOM Directory Structuring Elements
# All three tables are in the same format and can be merged for processing.
# The Command data elements (used only in networking), are listed in two tables in Part 7:
# * Table E.1-1. Command Fields
# * Table E.2-1. Retired Command Fields
# The Retired Command Fields are missing the last column. For processing here,
# we just add a last column with "RET", and they can be parsed with the same
# as for the Data elements.
data_element_tables=["table_6-1", "table_7-1", "table_8-1"]
def get_trs(document, table_id):
return document.findall(f"//db:table[@xml:id='{table_id}']/db:tbody/db:tr",
namespaces=dbns)
data_trs = sum((get_trs(part06, table_id) for table_id in data_element_tables), [])
cmd_trs = get_trs(part07, "table_E.1-1")
retired_cmd_trs = get_trs(part07, "table_E.2-1")
def get_texts_in_row(tr):
tds = tr.findall("db:td", namespaces=dbns)
texts = [" ".join(x.replace('\u200b', '').replace('\u00b5', 'u').strip() for x in td.itertext() if x.strip() != '') for td in tds]
return texts
data_rows = [get_texts_in_row(x) for x in data_trs]
retired_cmd_rows = [get_texts_in_row(x) for x in retired_cmd_trs]
cmd_rows = ([get_texts_in_row(x) for x in cmd_trs] +
[x + ["RET"] for x in retired_cmd_rows])
def parse_tag(tag):
# To handle some old cases where "x" is included as part of the tag number
tag = tag.replace("x", "0")
return f"0x{tag[1:5]}{tag[6:10]}"
def parse_ret(ret):
if ret.startswith("RET"):
return -1
else:
return 0
def include_in_parent_bit(name):
if name in include_in_parent:
return -1
else:
return 0
def text_for_row(row):
return f' {{ {parse_tag(row[0])}, "{row[1]}", "{row[3]}", "{row[4]}", {parse_ret(row[5])}, {include_in_parent_bit(row[1])}}},'
def text_for_rows(rows):
return "\n".join(text_for_row(row) for row in rows)
vrs = {i+1: get_texts_in_row(x)[0].split(maxsplit=1) for i,x in enumerate(get_trs(part05, "table_6.2-1"))}
# Table A-1. UID Values
uid_trs = get_trs(part06, "table_A-1")
uid_rows = [get_texts_in_row(x) for x in uid_trs]
def uid_define_name(uid):
if uid[1] == "(Retired)":
return f'"{uid[0]}"'
uid_type = uid[3]
uid_name = uid[1]
uid_name = re.sub(":.*", "", uid[1])
if uid_name.endswith(uid_type):
uid_name = uid_name[:-len(uid_type)].strip()
return f"DCM_UID_{definify(uid_type)}_{definify(uid_name)}"
import re
def definify(s):
return re.sub('[^A-Z0-9]+', '_', re.sub(' +', ' ', re.sub('[^-A-Z0-9 ]+', '', s.upper())))
uid_rows = sorted(uid_rows, key=lambda uid_row: [int(i) for i in uid_row[0].split(".")])
packet_dcm_h = """/* packet-dcm.h
* Definitions for DICOM dissection
* Copyright 2003, Rich Coe <[email protected]>
* Copyright 2008-2018, David Aggeler <[email protected]>
*
* DICOM communication protocol: https://www.dicomstandard.org/current/
*
* Generated automatically by """ + os.path.basename(sys.argv[0]) + """ from the following sources:
*
* """ + part05.find("./db:subtitle", namespaces=dbns).text + """
* """ + part06.find("./db:subtitle", namespaces=dbns).text + """
* """ + part07.find("./db:subtitle", namespaces=dbns).text + """
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <[email protected]>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef __PACKET_DCM_H__
#define __PACKET_DCM_H__
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
""" + "\n".join(f"#define DCM_VR_{vr[0]} {i:2d} /* {vr[1]:25s} */" for i,vr in vrs.items()) + """
/* Following must be in the same order as the definitions above */
static const gchar* dcm_tag_vr_lookup[] = {
" ",
""" + ",\n ".join(",".join(f'"{x[1][0]}"' for x in j[1]) for j in itertools.groupby(vrs.items(), lambda i: (i[0]-1)//8)) + """
};
/* ---------------------------------------------------------------------
* DICOM Tag Definitions
*
* Some Tags can have different VRs
*
* Group 1000 is not supported, multiple tags with same description (retired anyhow)
* Group 7Fxx is not supported, multiple tags with same description (retired anyhow)
*
* Tags (0020,3100 to 0020, 31FF) not supported, multiple tags with same description (retired anyhow)
*
* Repeating groups (50xx & 60xx) are manually added. Declared as 5000 & 6000
*/
typedef struct dcm_tag {
const guint32 tag;
const gchar *description;
const gchar *vr;
const gchar *vm;
const gboolean is_retired;
const gboolean add_to_summary; /* Add to parent's item description */
} dcm_tag_t;
static dcm_tag_t dcm_tag_data[] = {
/* Command Tags */
""" + text_for_rows(cmd_rows) + """
/* Data Tags */
""" + text_for_rows(data_rows) + """
};
/* ---------------------------------------------------------------------
* DICOM UID Definitions
* Part 6 lists following different UID Types (2006-2008)
* Application Context Name
* Coding Scheme
* DICOM UIDs as a Coding Scheme
* LDAP OID
* Meta SOP Class
* SOP Class
* Service Class
* Transfer Syntax
* Well-known Print Queue SOP Instance
* Well-known Printer SOP Instance
* Well-known SOP Instance
* Well-known frame of reference
*/
typedef struct dcm_uid {
const gchar *value;
const gchar *name;
const gchar *type;
} dcm_uid_t;
""" + "\n".join(f'#define {uid_define_name(uid)} "{uid[0]}"'
for uid in uid_rows if uid[1] != '(Retired)') + """
static dcm_uid_t dcm_uid_data[] = {
""" + "\n".join(f' {{ {uid_define_name(uid)}, "{uid[1]}", "{uid[3]}"}},'
for uid in uid_rows)+ """
};
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* packet-dcm.h */"""
print(packet_dcm_h) |
Python | wireshark/tools/make-pci-ids.py | #!/usr/bin/env python3
#
# make-pci-ids - Creates a file containing PCI IDs.
# It use the databases from
# https://github.com/pciutils/pciids/raw/master/pci.ids
# to create our file epan/dissectors/pci-ids.c
#
# Wireshark - Network traffic analyzer
#
# By Caleb Chiu <[email protected]>
# Copyright 2021
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import string
import sys
import urllib.request, urllib.error, urllib.parse
OUTPUT_FILE = "epan/pci-ids.c"
MIN_VENDOR_COUNT = 2250 # 2261 on 2021-11-01
MIN_DEVICE_COUNT = 33000 # 33724 on 2021-11-01
CODE_PREFIX = """\
*
* Generated by tools/make-pci-ids.py
* By Caleb Chiu <[email protected]>
* Copyright 2021
*
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <config.h>
#include <stddef.h>
#include "pci-ids.h"
typedef struct
{
uint16_t vid;
uint16_t did;
uint16_t svid;
uint16_t ssid;
char *name;
} pci_id_t;
typedef struct
{
uint16_t vid;
uint16_t count;
pci_id_t *ids_ptr;
} pci_vid_index_t;
"""
CODE_POSTFIX = """
static pci_vid_index_t *get_vid_index(uint16_t vid)
{
uint32_t start_index = 0;
uint32_t end_index = 0;
uint32_t idx = 0;
end_index = sizeof(pci_vid_index)/sizeof(pci_vid_index[0]);
while(start_index != end_index)
{
if(end_index - start_index == 1)
{
if(pci_vid_index[start_index].vid == vid)
return &pci_vid_index[start_index];
break;
}
idx = (start_index + end_index)/2;
if(pci_vid_index[idx].vid < vid)
start_index = idx;
else
if(pci_vid_index[idx].vid > vid)
end_index = idx;
else
return &pci_vid_index[idx];
}
return NULL;
}
const char *pci_id_str(uint16_t vid, uint16_t did, uint16_t svid, uint16_t ssid)
{
unsigned int i;
static char *not_found = \"Not found\";
pci_vid_index_t *index_ptr;
pci_id_t *ids_ptr;
index_ptr = get_vid_index(vid);
if(index_ptr == NULL)
return not_found;
ids_ptr = index_ptr->ids_ptr;
for(i = 0; i < index_ptr->count; ids_ptr++, i++)
if(vid == ids_ptr->vid &&
did == ids_ptr->did &&
svid == ids_ptr->svid &&
ssid == ids_ptr->ssid)
return ids_ptr->name;
return not_found;
}
"""
id_list=[]
count_list=[]
def exit_msg(msg=None, status=1):
if msg is not None:
sys.stderr.write(msg + '\n')
sys.exit(status)
def main():
req_headers = { 'User-Agent': 'Wireshark make-pci-ids' }
req = urllib.request.Request('https://github.com/pciutils/pciids/raw/master/pci.ids', headers=req_headers)
response = urllib.request.urlopen(req)
lines = response.read().decode('UTF-8', 'replace').splitlines()
out_lines = '''\
/* pci-ids.c
*
* pci-ids.c is based on the pci.ids of The PCI ID Repository at
* https://pci-ids.ucw.cz/, fetched indirectly via
* https://github.com/pciutils/pciids
'''
vid = -1
did = -1
svid = -1
entries = 0
line_num = 0
for line in lines:
line = line.strip('\n')
line_num += 1
if line_num <= 15:
line = line.replace('#', ' ', 1)
line = line.lstrip()
line = line.replace("GNU General Public License", "GPL")
if line:
line = ' * ' + line
else:
line = ' *' + line
out_lines += line + '\n'
if line_num == 15:
out_lines += CODE_PREFIX
line = line.replace("\\","\\\\")
line = line.replace("\"","\\\"")
line = line.replace("?","?-")
tabs = len(line) - len(line.lstrip('\t'))
if tabs == 0:
#print line
words = line.split(" ", 1)
if len(words) < 2:
continue
if len(words[0]) != 4:
continue
if all(c in string.hexdigits for c in words[0]):
hex_int = int(words[0], 16)
if vid != -1:
out_lines += "}; /* pci_vid_%04X[] */\n\n" % (vid)
count_list.append(entries)
vid = hex_int
entries = 1
did = -1
svid = -1
ssid = -1
out_lines += "pci_id_t pci_vid_%04X[] = {\n" % (vid)
out_lines += "{0x%04X, 0xFFFF, 0xFFFF, 0xFFFF, \"%s(0x%04X)\"},\n" % (vid, words[1].strip(), vid)
id_list.append(vid)
continue
if tabs == 1:
line = line.strip('\t')
words = line.split(" ", 1)
if len(words) < 2:
continue
if len(words[0]) != 4:
continue
if all(c in string.hexdigits for c in words[0]):
hex_int = int(words[0], 16)
did = hex_int
svid = -1
ssid = -1
out_lines += "{0x%04X, 0x%04X, 0xFFFF, 0xFFFF, \"%s(0x%04X)\"},\n" % (vid, did, words[1].strip(), did)
entries += 1
continue
if tabs == 2:
line = line.strip('\t')
words = line.split(" ", 2)
if len(words[0]) != 4:
continue
if all(c in string.hexdigits for c in words[0]):
hex_int = int(words[0], 16)
svid = hex_int
if all(c in string.hexdigits for c in words[1]):
hex_int = int(words[1], 16)
ssid = hex_int
out_lines += "{0x%04X, 0x%04X, 0x%04X, 0x%04X, \"%s(0x%04X-0x%04X)\"},\n" % (vid, did, svid, ssid, words[2].strip(), svid, ssid)
entries += 1
svid = -1
ssid = -1
continue
out_lines += "}; /* pci_vid_%04X[] */\n" % (vid)
count_list.append(entries)
out_lines += "\npci_vid_index_t pci_vid_index[] = {\n"
vendor_count = len(id_list)
device_count = 0
for i in range(vendor_count):
out_lines += "{0x%04X, %d, pci_vid_%04X },\n" % (id_list[i], count_list[i], id_list[i])
device_count += count_list[i]
out_lines += "}; /* We have %d VIDs */\n" % (vendor_count)
out_lines += CODE_POSTFIX
if vendor_count < MIN_VENDOR_COUNT:
exit_msg(f'Too view vendors. Wanted {MIN_VENDOR_COUNT}, got {vendor_count}.')
if device_count < MIN_DEVICE_COUNT:
exit_msg(f'Too view devices. Wanted {MIN_DEVICE_COUNT}, got {device_count}.')
with open(OUTPUT_FILE, "w", encoding="utf-8") as pci_ids_f:
pci_ids_f.write(out_lines)
if __name__ == '__main__':
main() |
Python | wireshark/tools/make-plugin-reg.py | #!/usr/bin/env python3
#
# Looks for registration routines in the plugins
# and assembles C code to call all the routines.
# A new "plugin.c" file will be written in the current directory.
#
import os
import sys
import re
#
# The first argument is the directory in which the source files live.
#
srcdir = sys.argv[1]
#
# The second argument is either "plugin", "plugin_wtap", "plugin_codec",
# or "plugin_tap".
#
registertype = sys.argv[2]
#
# All subsequent arguments are the files to scan.
#
files = sys.argv[3:]
final_filename = "plugin.c"
preamble = """\
/*
* Do not modify this file. Changes will be overwritten.
*
* Generated automatically from %s.
*/
""" % (os.path.basename(sys.argv[0]))
# Create the proper list of filenames
filenames = []
for file in files:
if os.path.isfile(file):
filenames.append(file)
else:
filenames.append(os.path.join(srcdir, file))
if len(filenames) < 1:
print("No files found")
sys.exit(1)
# Look through all files, applying the regex to each line.
# If the pattern matches, save the "symbol" section to the
# appropriate set.
regs = {
'proto_reg': set(),
'handoff_reg': set(),
'wtap_register': set(),
'codec_register': set(),
'register_tap_listener': set(),
}
# For those that don't know Python, r"" indicates a raw string,
# devoid of Python escapes.
proto_regex = r"\bproto_register_(?P<symbol>[\w]+)\s*\(\s*void\s*\)\s*{"
handoff_regex = r"\bproto_reg_handoff_(?P<symbol>[\w]+)\s*\(\s*void\s*\)\s*{"
wtap_reg_regex = r"\bwtap_register_(?P<symbol>[\w]+)\s*\(\s*void\s*\)\s*{"
codec_reg_regex = r"\bcodec_register_(?P<symbol>[\w]+)\s*\(\s*void\s*\)\s*{"
tap_reg_regex = r"\bregister_tap_listener_(?P<symbol>[\w]+)\s*\(\s*void\s*\)\s*{"
# This table drives the pattern-matching and symbol-harvesting
patterns = [
( 'proto_reg', re.compile(proto_regex, re.MULTILINE | re.ASCII) ),
( 'handoff_reg', re.compile(handoff_regex, re.MULTILINE | re.ASCII) ),
( 'wtap_register', re.compile(wtap_reg_regex, re.MULTILINE | re.ASCII) ),
( 'codec_register', re.compile(codec_reg_regex, re.MULTILINE | re.ASCII) ),
( 'register_tap_listener', re.compile(tap_reg_regex, re.MULTILINE | re.ASCII) ),
]
# Grep
for filename in filenames:
file = open(filename)
# Read the whole file into memory
contents = file.read()
for action in patterns:
regex = action[1]
for match in regex.finditer(contents):
symbol = match.group("symbol")
sym_type = action[0]
regs[sym_type].add(symbol)
# We're done with the file contents
del contents
file.close()
# Make sure we actually processed something
if (len(regs['proto_reg']) < 1 and len(regs['wtap_register']) < 1 and len(regs['codec_register']) < 1 and len(regs['register_tap_listener']) < 1):
print("No plugin registrations found")
sys.exit(1)
# Convert the sets into sorted lists to make the output pretty
regs['proto_reg'] = sorted(regs['proto_reg'])
regs['handoff_reg'] = sorted(regs['handoff_reg'])
regs['wtap_register'] = sorted(regs['wtap_register'])
regs['codec_register'] = sorted(regs['codec_register'])
regs['register_tap_listener'] = sorted(regs['register_tap_listener'])
reg_code = ""
reg_code += preamble
reg_code += """
#include "config.h"
#include <gmodule.h>
/* plugins are DLLs on Windows */
#define WS_BUILD_DLL
#include "ws_symbol_export.h"
"""
if registertype == "plugin":
reg_code += "#include \"epan/proto.h\"\n\n"
if registertype == "plugin_wtap":
reg_code += "#include \"wiretap/wtap.h\"\n\n"
if registertype == "plugin_codec":
reg_code += "#include \"wsutil/codecs.h\"\n\n"
if registertype == "plugin_tap":
reg_code += "#include \"epan/tap.h\"\n\n"
for symbol in regs['proto_reg']:
reg_code += "void proto_register_%s(void);\n" % (symbol)
for symbol in regs['handoff_reg']:
reg_code += "void proto_reg_handoff_%s(void);\n" % (symbol)
for symbol in regs['wtap_register']:
reg_code += "void wtap_register_%s(void);\n" % (symbol)
for symbol in regs['codec_register']:
reg_code += "void codec_register_%s(void);\n" % (symbol)
for symbol in regs['register_tap_listener']:
reg_code += "void register_tap_listener_%s(void);\n" % (symbol)
reg_code += """
WS_DLL_PUBLIC_DEF const gchar plugin_version[] = PLUGIN_VERSION;
WS_DLL_PUBLIC_DEF const int plugin_want_major = VERSION_MAJOR;
WS_DLL_PUBLIC_DEF const int plugin_want_minor = VERSION_MINOR;
WS_DLL_PUBLIC void plugin_register(void);
void plugin_register(void)
{
"""
if registertype == "plugin":
for symbol in regs['proto_reg']:
reg_code +=" static proto_plugin plug_%s;\n\n" % (symbol)
reg_code +=" plug_%s.register_protoinfo = proto_register_%s;\n" % (symbol, symbol)
if symbol in regs['handoff_reg']:
reg_code +=" plug_%s.register_handoff = proto_reg_handoff_%s;\n" % (symbol, symbol)
else:
reg_code +=" plug_%s.register_handoff = NULL;\n" % (symbol)
reg_code += " proto_register_plugin(&plug_%s);\n" % (symbol)
if registertype == "plugin_wtap":
for symbol in regs['wtap_register']:
reg_code += " static wtap_plugin plug_%s;\n\n" % (symbol)
reg_code += " plug_%s.register_wtap_module = wtap_register_%s;\n" % (symbol, symbol)
reg_code += " wtap_register_plugin(&plug_%s);\n" % (symbol)
if registertype == "plugin_codec":
for symbol in regs['codec_register']:
reg_code += " static codecs_plugin plug_%s;\n\n" % (symbol)
reg_code += " plug_%s.register_codec_module = codec_register_%s;\n" % (symbol, symbol)
reg_code += " codecs_register_plugin(&plug_%s);\n" % (symbol)
if registertype == "plugin_tap":
for symbol in regs['register_tap_listener']:
reg_code += " static tap_plugin plug_%s;\n\n" % (symbol)
reg_code += " plug_%s.register_tap_listener = register_tap_listener_%s;\n" % (symbol, symbol)
reg_code += " tap_register_plugin(&plug_%s);\n" % (symbol)
reg_code += "}\n"
try:
fh = open(final_filename, 'w')
fh.write(reg_code)
fh.close()
except OSError:
sys.exit('Unable to write ' + final_filename + '.\n')
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
# |
Python | wireshark/tools/make-regs.py | #!/usr/bin/env python3
#
# Looks for registration routines in the source files
# and assembles C code to call all the routines.
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import sys
import re
preamble = """\
/*
* Do not modify this file. Changes will be overwritten.
*
* Generated automatically using \"make-regs.py\".
*/
"""
def gen_prototypes(funcs):
output = ""
for f in funcs:
output += "void {}(void);\n".format(f)
return output
def gen_array(funcs, name):
output = "{}[] = {{\n".format(name)
for f in funcs:
output += " {{ \"{0}\", {0} }},\n".format(f)
output += " { NULL, NULL }\n};\n"
return output
def scan_files(infiles, regs):
for path in infiles:
with open(path, 'r', encoding='utf8') as f:
source = f.read()
for array, regex in regs:
matches = re.findall(regex, source)
array.extend(matches)
def make_dissectors(outfile, infiles):
protos = []
protos_regex = r"void\s+(proto_register_[\w]+)\s*\(\s*void\s*\)\s*{"
handoffs = []
handoffs_regex = r"void\s+(proto_reg_handoff_[\w]+)\s*\(\s*void\s*\)\s*{"
scan_files(infiles, [(protos, protos_regex), (handoffs, handoffs_regex)])
if len(protos) < 1:
sys.exit("No protocol registrations found.")
protos.sort()
handoffs.sort()
output = preamble
output += """\
#include "dissectors.h"
const unsigned long dissector_reg_proto_count = {0};
const unsigned long dissector_reg_handoff_count = {1};
""".format(len(protos), len(handoffs))
output += gen_prototypes(protos)
output += "\n"
output += gen_array(protos, "dissector_reg_t dissector_reg_proto")
output += "\n"
output += gen_prototypes(handoffs)
output += "\n"
output += gen_array(handoffs, "dissector_reg_t dissector_reg_handoff")
with open(outfile, "w") as f:
f.write(output)
print("Found {0} registrations and {1} handoffs.".format(len(protos), len(handoffs)))
def make_wtap_modules(outfile, infiles):
wtap_modules = []
wtap_modules_regex = r"void\s+(register_[\w]+)\s*\(\s*void\s*\)\s*{"
scan_files(infiles, [(wtap_modules, wtap_modules_regex)])
if len(wtap_modules) < 1:
sys.exit("No wiretap registrations found.")
wtap_modules.sort()
output = preamble
output += """\
#include "wtap_modules.h"
const unsigned wtap_module_count = {0};
""".format(len(wtap_modules))
output += gen_prototypes(wtap_modules)
output += "\n"
output += gen_array(wtap_modules, "wtap_module_reg_t wtap_module_reg")
with open(outfile, "w") as f:
f.write(output)
print("Found {0} registrations.".format(len(wtap_modules)))
def make_taps(outfile, infiles):
taps = []
taps_regex = r"void\s+(register_tap_listener_[\w]+)\s*\(\s*void\s*\)\s*{"
scan_files(infiles, [(taps, taps_regex)])
if len(taps) < 1:
sys.exit("No tap registrations found.")
taps.sort()
output = preamble
output += """\
#include "ui/taps.h"
const unsigned long tap_reg_listener_count = {0};
""".format(len(taps))
output += gen_prototypes(taps)
output += "\n"
output += gen_array(taps, "tap_reg_t tap_reg_listener")
with open(outfile, "w") as f:
f.write(output)
print("Found {0} registrations.".format(len(taps)))
def print_usage():
sys.exit("Usage: {0} <dissectors|taps> <outfile> <infiles...|@filelist>\n".format(sys.argv[0]))
if __name__ == "__main__":
if len(sys.argv) < 4:
print_usage()
mode = sys.argv[1]
outfile = sys.argv[2]
if sys.argv[3].startswith("@"):
with open(sys.argv[3][1:]) as f:
infiles = [l.strip() for l in f.readlines()]
else:
infiles = sys.argv[3:]
if mode == "dissectors":
make_dissectors(outfile, infiles)
elif mode == "wtap_modules":
make_wtap_modules(outfile, infiles)
elif mode == "taps":
make_taps(outfile, infiles)
else:
print_usage() |
Python | wireshark/tools/make-services.py | #!/usr/bin/env python3
#
# Parses the CSV version of the IANA Service Name and Transport Protocol Port Number Registry
# and generates a services(5) file.
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 2013 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
iana_svc_url = 'https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.csv'
__doc__ = '''\
Usage: make-services.py [url]
url defaults to
%s
''' % (iana_svc_url)
import sys
import getopt
import csv
import re
import collections
import urllib.request, urllib.error, urllib.parse
import codecs
services_file = 'epan/services-data.c'
exclude_services = [
'^spr-itunes',
'^spl-itunes',
'^shilp',
]
min_source_lines = 14000 # Size was ~ 14800 on 2017-07-20
def parse_port(port_str):
p = port_str.split('-')
try:
if len(p) == 1:
return tuple([int(p[0])])
if len(p) == 2:
return tuple([int(p[0]), int(p[1])])
except ValueError:
pass
return ()
def port_to_str(port):
if len(port) == 2:
return str(port[0]) + '-' + str(port[1])
return str(port[0])
def parse_rows(svc_fd):
port_reader = csv.reader(svc_fd)
count = 0
# Header positions as of 2013-08-06
headers = next(port_reader)
try:
sn_pos = headers.index('Service Name')
except Exception:
sn_pos = 0
try:
pn_pos = headers.index('Port Number')
except Exception:
pn_pos = 1
try:
tp_pos = headers.index('Transport Protocol')
except Exception:
tp_pos = 2
try:
desc_pos = headers.index('Description')
except Exception:
desc_pos = 3
services_map = {}
for row in port_reader:
service = row[sn_pos]
port = parse_port(row[pn_pos])
proto = row[tp_pos]
description = row[desc_pos]
count += 1
if len(service) < 1 or not port or len(proto) < 1:
continue
if re.search('|'.join(exclude_services), service):
continue
# max 15 chars
service = service[:15].rstrip()
# replace blanks (for some non-standard long names)
service = service.replace(" ", "-")
description = description.replace("\n", "")
description = re.sub("IANA assigned this well-formed service .+$", "", description)
description = re.sub(" +", " ", description)
description = description.strip()
if description == service or description == service.replace("-", " "):
description = None
if not port in services_map:
services_map[port] = collections.OrderedDict()
# Remove some duplicates (first entry wins)
proto_exists = False
for k in services_map[port].keys():
if proto in services_map[port][k]:
proto_exists = True
break
if proto_exists:
continue
if not service in services_map[port]:
services_map[port][service] = [description]
services_map[port][service].append(proto)
if count < min_source_lines:
exit_msg('Not enough parsed data')
return services_map
def compile_body(d):
keys = list(d.keys())
keys.sort()
body = []
for port in keys:
for serv in d[port].keys():
line = [port, d[port][serv][1:], serv]
description = d[port][serv][0]
if description:
line.append(description)
body.append(line)
return body
def add_entry(table, port, service_name, description):
table.append([int(port), service_name, description])
# body = [(port-range,), [proto-list], service-name, optional-description]
# table = [port-number, service-name, optional-description]
def compile_tables(body):
body.sort()
tcp_udp_table = []
tcp_table = []
udp_table = []
sctp_table = []
dccp_table = []
for entry in body:
if len(entry) == 4:
port_range, proto_list, service_name, description = entry
else:
port_range, proto_list, service_name = entry
description = None
for port in port_range:
if 'tcp' in proto_list and 'udp' in proto_list:
add_entry(tcp_udp_table, port, service_name, description)
else:
if 'tcp' in proto_list:
add_entry(tcp_table, port, service_name, description)
if 'udp' in proto_list:
add_entry(udp_table, port, service_name, description)
if 'sctp' in proto_list:
add_entry(sctp_table, port, service_name, description)
if 'dccp' in proto_list:
add_entry(dccp_table, port, service_name, description)
return tcp_udp_table, tcp_table, udp_table, sctp_table, dccp_table
def exit_msg(msg=None, status=1):
if msg is not None:
sys.stderr.write(msg + '\n\n')
sys.stderr.write(__doc__ + '\n')
sys.exit(status)
def main(argv):
if sys.version_info[0] < 3:
print("This requires Python 3")
sys.exit(2)
try:
opts, _ = getopt.getopt(argv, "h", ["help"])
except getopt.GetoptError:
exit_msg()
for opt, _ in opts:
if opt in ("-h", "--help"):
exit_msg(None, 0)
if (len(argv) > 0):
svc_url = argv[0]
else:
svc_url = iana_svc_url
try:
if not svc_url.startswith('http'):
svc_fd = open(svc_url)
else:
req = urllib.request.urlopen(svc_url)
svc_fd = codecs.getreader('utf8')(req)
except Exception:
exit_msg('Error opening ' + svc_url)
body = parse_rows(svc_fd)
out = open(services_file, 'w')
out.write('''\
/*
* Wireshark - Network traffic analyzer
* By Gerald Combs <[email protected]>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*
* This is a local copy of the IANA port-numbers file.
*
* Wireshark uses it to resolve port numbers into human readable
* service names, e.g. TCP port 80 -> http.
*
* It is subject to copyright and being used with IANA's permission:
* https://www.wireshark.org/lists/wireshark-dev/200708/msg00160.html
*
* The original file can be found at:
* %s
*/
''' % (iana_svc_url))
body = compile_body(body)
# body = [(port-range,), [proto-list], service-name, optional-description]
max_port = 0
tcp_udp, tcp, udp, sctp, dccp = compile_tables(body)
def write_entry(f, e, max_port):
line = " {{ {}, \"{}\", ".format(*e)
sep_len = 32 - len(line)
if sep_len <= 0:
sep_len = 1
line += ' ' * sep_len
if len(e) == 3 and e[2]:
line += "\"{}\" }},\n".format(e[2].replace('"', '\\"'))
else:
line += "\"\" },\n"
f.write(line)
if int(e[0]) > int(max_port):
return e[0]
return max_port
out.write("static ws_services_entry_t global_tcp_udp_services_table[] = {\n")
for e in tcp_udp:
max_port = write_entry(out, e, max_port)
out.write("};\n\n")
out.write("static ws_services_entry_t global_tcp_services_table[] = {\n")
for e in tcp:
max_port = write_entry(out, e, max_port)
out.write("};\n\n")
out.write("static ws_services_entry_t global_udp_services_table[] = {\n")
for e in udp:
max_port = write_entry(out, e, max_port)
out.write("};\n\n")
out.write("static ws_services_entry_t global_sctp_services_table[] = {\n")
for e in sctp:
max_port = write_entry(out, e, max_port)
out.write("};\n\n")
out.write("static ws_services_entry_t global_dccp_services_table[] = {\n")
for e in dccp:
max_port = write_entry(out, e, max_port)
out.write("};\n\n")
out.write("static const uint16_t _services_max_port = {};\n".format(max_port))
out.close()
if __name__ == "__main__":
sys.exit(main(sys.argv[1:])) |
Python | wireshark/tools/make-tls-ct-logids.py | #!/usr/bin/env python3
# Generate the array of Certificate Transparency Log ID to description mappings
# for the TLS dissector.
#
# To update the TLS dissector source file, run this from the source directory:
#
# python3 tools/make-tls-ct-logids.py --update
#
import argparse
from base64 import b64decode, b64encode
from enum import Enum
import itertools
import os
import requests
from hashlib import sha256
# Begin of comment, followed by the actual array definition
HEADER = "/* Generated by tools/make-tls-ct-logids.py\n"
# See also https://www.certificate-transparency.org/known-logs
CT_JSON_URL = 'https://www.gstatic.com/ct/log_list/v3/all_logs_list.json'
# File to be patched
SOURCE_FILE = os.path.join('epan', 'dissectors', 'packet-tls-utils.c')
# Maximum elements per line in the value array. 11 is chosen because it results
# in output consistent with clang-format.
BYTES_PER_LINE = 11
class SourceStage(Enum):
BEGIN = 1
IN_METAINFO = 2
IN_BLOCK = 3
END = 4
def escape_c(s):
return s.replace('\\', '\\\\').replace('"', '\\"')
def byteshex(b):
return " ".join("0x%02x," % b for b in bytearray(b))
def process_json(obj, lastmod):
logs = list(itertools.chain(*[op['logs'] for op in obj['operators']]))
metainfo, block = HEADER, ''
metainfo += " * Last-Modified %s, %s entries. */\n" % (lastmod, len(logs))
block += "static const bytes_string ct_logids[] = {\n"
for entry in logs:
desc = entry["description"]
pubkey_der = b64decode(entry["key"])
key_id = sha256(pubkey_der).digest()
block += ' { (const uint8_t[]){\n'
for offset in range(0, len(key_id), BYTES_PER_LINE):
block += ' %s\n' % \
byteshex(key_id[offset:offset+BYTES_PER_LINE])
block += ' },\n'
block += ' %d, "%s" },\n' % (len(key_id), escape_c(desc))
block += " { NULL, 0, NULL }\n"
block += "};\n"
return metainfo, block
def parse_source(source_path):
"""
Reads the source file and tries to split it in the parts before, inside and
after the block.
"""
begin, metainfo, block, end = '', '', '', ''
# Stages: BEGIN (before block), IN_METAINFO, IN_BLOCK (skip), END
stage = SourceStage.BEGIN
with open(source_path) as f:
for line in f:
if line.startswith('/* Generated by '):
stage = SourceStage.IN_METAINFO
if stage == SourceStage.BEGIN:
begin += line
elif stage == SourceStage.IN_METAINFO:
metainfo += line
elif stage == SourceStage.IN_BLOCK:
block += line
if line.startswith('}'):
stage = SourceStage.END
elif stage == SourceStage.END:
end += line
if line.startswith(' * Last-Modified '):
stage = SourceStage.IN_BLOCK
if stage != SourceStage.END:
raise RuntimeError("Could not parse file (in stage %s)" % stage.name)
return begin, metainfo, block, end
parser = argparse.ArgumentParser()
parser.add_argument("--update", action="store_true",
help="Update %s as needed instead of writing to stdout" % SOURCE_FILE)
def main():
args = parser.parse_args()
this_dir = os.path.dirname(__file__)
r = requests.get(CT_JSON_URL)
j_metainfo, j_block = process_json(r.json(), lastmod=r.headers['Last-Modified'])
source_path = os.path.join(this_dir, '..', SOURCE_FILE)
if args.update:
s_begin, _, s_block, s_end = parse_source(source_path)
if s_block == j_block:
print("File is up-to-date")
else:
with open(source_path, "w") as f:
f.write(s_begin)
f.write(j_metainfo)
f.write(j_block)
f.write(s_end)
print("Updated %s" % source_path)
else:
print(j_metainfo, j_block)
if __name__ == '__main__':
main() |
Python | wireshark/tools/make-usb.py | #!/usr/bin/env python3
#
# make-usb - Creates a file containing vendor and product ids.
# It use the databases from
# - The USB ID Repository: https://usb-ids.gowdy.us (http://www.linux-usb.org), mirrored at Sourceforge
# - libgphoto2 from gPhoto: https://github.com/gphoto/libgphoto2 (http://gphoto.org), available at GitHub
# to create our file epan/dissectors/usb.c
import re
import sys
import urllib.request, urllib.error, urllib.parse
MODE_IDLE = 0
MODE_VENDOR_PRODUCT = 1
MIN_VENDORS = 3400 # 3409 as of 2020-11-15
MIN_PRODUCTS = 20000 # 20361 as of 2020-11-15
mode = MODE_IDLE
req_headers = { 'User-Agent': 'Wireshark make-usb' }
req = urllib.request.Request('https://sourceforge.net/p/linux-usb/repo/HEAD/tree/trunk/htdocs/usb.ids?format=raw', headers=req_headers)
response = urllib.request.urlopen(req)
lines = response.read().decode('UTF-8', 'replace').splitlines()
vendors = dict()
products = dict()
vendors_str="static const value_string usb_vendors_vals[] = {\n"
products_str="static const value_string usb_products_vals[] = {\n"
# Escape backslashes, quotes, control characters and non-ASCII characters.
escapes = {}
for i in range(256):
if i in b'\\"':
escapes[i] = '\\%c' % i
elif i in range(0x20, 0x80) or i in b'\t':
escapes[i] = chr(i)
else:
escapes[i] = '\\%03o' % i
for utf8line in lines:
# Convert single backslashes to double (escaped) backslashes, escape quotes, etc.
utf8line = utf8line.rstrip()
utf8line = re.sub("\?+", "?", utf8line)
line = ''.join(escapes[byte] for byte in utf8line.encode('utf8'))
if line == "# Vendors, devices and interfaces. Please keep sorted.":
mode = MODE_VENDOR_PRODUCT
continue
elif line == "# List of known device classes, subclasses and protocols":
mode = MODE_IDLE
continue
if mode == MODE_VENDOR_PRODUCT:
if re.match("^[0-9a-f]{4}", line):
last_vendor=line[:4]
vendors[last_vendor] = line[4:].strip()
elif re.match("^\t[0-9a-f]{4}", line):
line = line.strip()
product = "%s%s"%(last_vendor, line[:4])
products[product] = line[4:].strip()
req = urllib.request.Request('https://raw.githubusercontent.com/gphoto/libgphoto2/master/camlibs/ptp2/library.c', headers=req_headers)
response = urllib.request.urlopen(req)
lines = response.read().decode('UTF-8', 'replace').splitlines()
mode = MODE_IDLE
for line in lines:
if mode == MODE_IDLE and re.match(r".*\bmodels\[\]", line):
mode = MODE_VENDOR_PRODUCT
continue
if mode == MODE_VENDOR_PRODUCT and re.match(r"};", line):
mode = MODE_IDLE
if mode == MODE_IDLE:
continue
m = re.match(r"\s*{\"(.*):(.*)\",\s*0x([0-9a-fA-F]{4}),\s*0x([0-9a-fA-F]{4}),.*},", line)
if m is not None:
manuf = m.group(1).strip()
model = re.sub(r"\(.*\)", "", m.group(2)).strip()
product = m.group(3) + m.group(4)
products[product] = ' '.join((manuf, model))
req = urllib.request.Request('https://raw.githubusercontent.com/gphoto/libgphoto2/master/camlibs/ptp2/music-players.h', headers=req_headers)
response = urllib.request.urlopen(req)
lines = response.read().decode('UTF-8', 'replace').splitlines()
for line in lines:
m = re.match(r"\s*{\s*\"(.*)\",\s*0x([0-9a-fA-F]{4}),\s*\"(.*)\",\s*0x([0-9a-fA-F]{4}),", line)
if m is not None:
manuf = m.group(1).strip()
model = m.group(3).strip()
product = m.group(2) + m.group(4)
products[product] = ' '.join((manuf, model))
if (len(vendors) < MIN_VENDORS):
sys.stderr.write("Not enough vendors: %d\n" % len(vendors))
sys.exit(1)
if (len(products) < MIN_PRODUCTS):
sys.stderr.write("Not enough products: %d\n" % len(products))
sys.exit(1)
for v in sorted(vendors):
vendors_str += " { 0x%s, \"%s\" },\n"%(v,vendors[v])
vendors_str += """ { 0, NULL }\n};
value_string_ext ext_usb_vendors_vals = VALUE_STRING_EXT_INIT(usb_vendors_vals);
"""
for p in sorted(products):
products_str += " { 0x%s, \"%s\" },\n"%(p,products[p])
products_str += """ { 0, NULL }\n};
value_string_ext ext_usb_products_vals = VALUE_STRING_EXT_INIT(usb_products_vals);
"""
header="""/* usb.c
* USB vendor id and product ids
* This file was generated by running python ./tools/make-usb.py
* Don't change it directly.
*
* Copyright 2012, Michal Labedzki for Tieto Corporation
*
* Other values imported from libghoto2/camlibs/ptp2/library.c, music-players.h
*
* Copyright (C) 2001-2005 Mariusz Woloszyn <[email protected]>
* Copyright (C) 2003-2013 Marcus Meissner <[email protected]>
* Copyright (C) 2005 Hubert Figuiere <[email protected]>
* Copyright (C) 2009 Axel Waggershauser <[email protected]>
* Copyright (C) 2005-2007 Richard A. Low <[email protected]>
* Copyright (C) 2005-2012 Linus Walleij <[email protected]>
* Copyright (C) 2007 Ted Bullock
* Copyright (C) 2012 Sony Mobile Communications AB
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <[email protected]>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
/*
* XXX We should probably parse a USB ID file at program start instead
* of generating this file.
*/
#include "config.h"
#include <epan/packet.h>
"""
f = open('epan/dissectors/usb.c', 'w')
f.write(header)
f.write("\n")
f.write(vendors_str)
f.write("\n\n")
f.write(products_str)
f.write("\n")
f.close()
print("Success!") |
Python | wireshark/tools/make-version.py | #!/usr/bin/env python3
#
# Copyright 2022 by Moshe Kaplan
# Based on make-version.pl by Jörg Mayer
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
# See below for usage.
#
# If run with the "-r" or "--set-release" argument the VERSION macro in
# CMakeLists.txt will have the version_extra template appended to the
# version number. vcs_version.h will _not_ be generated if either argument is
# present.
#
# make-version.py is called during the build to update vcs_version.h in the build
# directory. To set a fixed version, use something like:
#
# cmake -DVCSVERSION_OVERRIDE="Git v3.1.0 packaged as 3.1.0-1"
#
# XXX - We're pretty dumb about the "{vcsinfo}" substitution, and about having
# spaces in the package format.
import argparse
import os
import os.path
import re
import shlex
import shutil
import sys
import subprocess
GIT_ABBREV_LENGTH = 12
# `git archive` will use an 'export-subst' entry in .gitattributes to replace
# the $Format strings with `git log --pretty=format:` placeholders.
# The output will look something like the following:
# GIT_EXPORT_SUBST_H = '51315cf37cdf6c0add1b1c99cb7941aac4489a6f'
# GIT_EXPORT_SUBST_D = 'HEAD -> master, upstream/master, upstream/HEAD'
# If the text "$Format" is still present, it means that
# git archive did not replace the $Format string, which
# means that this not a git archive.
GIT_EXPORT_SUBST_H = '17cd9891be299d3dd75524a61378ba39ab8e3199'
GIT_EXPORT_SUBST_D = 'HEAD -> master'
IS_GIT_ARCHIVE = not GIT_EXPORT_SUBST_H.startswith('$Format')
def update_cmakelists_txt(src_dir, set_version, repo_data):
if not set_version and repo_data['package_string'] == "":
return
cmake_filepath = os.path.join(src_dir, "CMakeLists.txt")
with open(cmake_filepath, encoding='utf-8') as fh:
cmake_contents = fh.read()
MAJOR_PATTERN = r"^set *\( *PROJECT_MAJOR_VERSION *\d+ *\)$"
MINOR_PATTERN = r"^set *\( *PROJECT_MINOR_VERSION *\d+ *\)$"
PATCH_PATTERN = r"^set *\( *PROJECT_PATCH_VERSION *\d+ *\)$"
VERSION_EXTENSION_PATTERN = r"^set *\( *PROJECT_VERSION_EXTENSION .*?$"
new_cmake_contents = cmake_contents
new_cmake_contents = re.sub(MAJOR_PATTERN,
f"set(PROJECT_MAJOR_VERSION {repo_data['version_major']})",
new_cmake_contents,
flags=re.MULTILINE)
new_cmake_contents = re.sub(MINOR_PATTERN,
f"set(PROJECT_MINOR_VERSION {repo_data['version_minor']})",
new_cmake_contents,
flags=re.MULTILINE)
new_cmake_contents = re.sub(PATCH_PATTERN,
f"set(PROJECT_PATCH_VERSION {repo_data['version_patch']})",
new_cmake_contents,
flags=re.MULTILINE)
new_cmake_contents = re.sub(VERSION_EXTENSION_PATTERN,
f"set(PROJECT_VERSION_EXTENSION \"{repo_data['package_string']}\")",
new_cmake_contents,
flags=re.MULTILINE)
with open(cmake_filepath, mode='w', encoding='utf-8') as fh:
fh.write(new_cmake_contents)
print(cmake_filepath + " has been updated.")
def update_debian_changelog(src_dir, repo_data):
# Read packaging/debian/changelog, then write back out an updated version.
deb_changelog_filepath = os.path.join(src_dir, "packaging", "debian", "changelog")
with open(deb_changelog_filepath, encoding='utf-8') as fh:
changelog_contents = fh.read()
CHANGELOG_PATTERN = r"^.*"
text_replacement = f"wireshark ({repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}{repo_data['package_string']}) unstable; urgency=low"
# Note: Only need to replace the first line, so we don't use re.MULTILINE or re.DOTALL
new_changelog_contents = re.sub(CHANGELOG_PATTERN, text_replacement, changelog_contents)
with open(deb_changelog_filepath, mode='w', encoding='utf-8') as fh:
fh.write(new_changelog_contents)
print(deb_changelog_filepath + " has been updated.")
def update_attributes_asciidoc(src_dir, repo_data):
# Read docbook/attributes.adoc, then write it back out with an updated
# wireshark-version replacement line.
asiidoc_filepath = os.path.join(src_dir, "docbook", "attributes.adoc")
with open(asiidoc_filepath, encoding='utf-8') as fh:
asciidoc_contents = fh.read()
# Sample line (without quotes): ":wireshark-version: 2.3.1"
ASCIIDOC_PATTERN = r"^:wireshark-version:.*$"
text_replacement = f":wireshark-version: {repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}"
new_asciidoc_contents = re.sub(ASCIIDOC_PATTERN, text_replacement, asciidoc_contents, flags=re.MULTILINE)
with open(asiidoc_filepath, mode='w', encoding='utf-8') as fh:
fh.write(new_asciidoc_contents)
print(asiidoc_filepath + " has been updated.")
def update_docinfo_asciidoc(src_dir, repo_data):
doc_paths = []
doc_paths += [os.path.join(src_dir, 'docbook', 'wsdg_src', 'developer-guide-docinfo.xml')]
doc_paths += [os.path.join(src_dir, 'docbook', 'wsug_src', 'user-guide-docinfo.xml')]
for doc_path in doc_paths:
with open(doc_path, encoding='utf-8') as fh:
doc_contents = fh.read()
# Sample line (without quotes): "<subtitle>For Wireshark 1.2</subtitle>"
DOC_PATTERN = r"^<subtitle>For Wireshark \d+.\d+<\/subtitle>$"
text_replacement = f"<subtitle>For Wireshark {repo_data['version_major']}.{repo_data['version_minor']}</subtitle>"
new_doc_contents = re.sub(DOC_PATTERN, text_replacement, doc_contents, flags=re.MULTILINE)
with open(doc_path, mode='w', encoding='utf-8') as fh:
fh.write(new_doc_contents)
print(doc_path + " has been updated.")
def update_cmake_lib_releases(src_dir, repo_data):
# Read CMakeLists.txt for each library, then write back out an updated version.
dir_paths = []
dir_paths += [os.path.join(src_dir, 'epan')]
dir_paths += [os.path.join(src_dir, 'wiretap')]
for dir_path in dir_paths:
cmakelists_filepath = os.path.join(dir_path, "CMakeLists.txt")
with open(cmakelists_filepath, encoding='utf-8') as fh:
cmakelists_contents = fh.read()
# Sample line (without quotes; note leading tab: " VERSION "0.0.0" SOVERSION 0")
VERSION_PATTERN = r'^(\s*VERSION\s+"\d+\.\d+\.)\d+'
replacement_text = f"\\g<1>{repo_data['version_patch']}"
new_cmakelists_contents = re.sub(VERSION_PATTERN,
replacement_text,
cmakelists_contents,
flags=re.MULTILINE)
with open(cmakelists_filepath, mode='w', encoding='utf-8') as fh:
fh.write(new_cmakelists_contents)
print(cmakelists_filepath + " has been updated.")
# Update distributed files that contain any version information
def update_versioned_files(src_dir, set_version, repo_data):
update_cmakelists_txt(src_dir, set_version, repo_data)
update_debian_changelog(src_dir, repo_data)
if set_version:
update_attributes_asciidoc(src_dir, repo_data)
update_docinfo_asciidoc(src_dir, repo_data)
update_cmake_lib_releases(src_dir, repo_data)
def generate_version_h(repo_data):
# Generate new contents of version.h from repository data
if not repo_data.get('enable_vcsversion'):
return "/* #undef VCSVERSION */\n"
if repo_data.get('git_description'):
# Do not bother adding the git branch, the git describe output
# normally contains the base tag and commit ID which is more
# than sufficient to determine the actual source tree.
return f'#define VCSVERSION "{repo_data["git_description"]}"\n'
if repo_data.get('last_change') and repo_data.get('num_commits'):
version_string = f"v{repo_data['version_major']}.{repo_data['version_minor']}.{repo_data['version_patch']}"
vcs_line = f'#define VCSVERSION "{version_string}-Git-{repo_data["num_commits"]}"\n'
return vcs_line
if repo_data.get('commit_id'):
vcs_line = f'#define VCSVERSION "Git commit {repo_data["commit_id"]}"\n'
return vcs_line
vcs_line = '#define VCSVERSION "Git Rev Unknown from unknown"\n'
return vcs_line
def print_VCS_REVISION(version_file, repo_data, set_vcs):
# Write the version control system's version to $version_file.
# Don't change the file if it is not needed.
#
# XXX - We might want to add VCSVERSION to CMakeLists.txt so that it can
# generate vcs_version.h independently.
new_version_h = generate_version_h(repo_data)
needs_update = True
if os.path.exists(version_file):
with open(version_file, encoding='utf-8') as fh:
current_version_h = fh.read()
if current_version_h == new_version_h:
needs_update = False
if not set_vcs:
return
if needs_update:
with open(version_file, mode='w', encoding='utf-8') as fh:
fh.write(new_version_h)
print(version_file + " has been updated.")
elif not repo_data['enable_vcsversion']:
print(version_file + " disabled.")
else:
print(version_file + " unchanged.")
return
def get_version(cmakelists_file_data):
# Reads major, minor, and patch
# Sample data:
# set(PROJECT_MAJOR_VERSION 3)
# set(PROJECT_MINOR_VERSION 7)
# set(PROJECT_PATCH_VERSION 2)
MAJOR_PATTERN = r"^set *\( *PROJECT_MAJOR_VERSION *(\d+) *\)$"
MINOR_PATTERN = r"^set *\( *PROJECT_MINOR_VERSION *(\d+) *\)$"
PATCH_PATTERN = r"^set *\( *PROJECT_PATCH_VERSION *(\d+) *\)$"
major_match = re.search(MAJOR_PATTERN, cmakelists_file_data, re.MULTILINE)
minor_match = re.search(MINOR_PATTERN, cmakelists_file_data, re.MULTILINE)
patch_match = re.search(PATCH_PATTERN, cmakelists_file_data, re.MULTILINE)
if not major_match:
raise Exception("Couldn't get major version")
if not minor_match:
raise Exception("Couldn't get minor version")
if not patch_match:
raise Exception("Couldn't get patch version")
major_version = major_match.groups()[0]
minor_version = minor_match.groups()[0]
patch_version = patch_match.groups()[0]
return major_version, minor_version, patch_version
def read_git_archive(tagged_version_extra, untagged_version_extra):
# Reads key data from the git repo.
# For git archives, this does not need to access the source directory because
# `git archive` will use an 'export-subst' entry in .gitattributes to replace
# the value for GIT_EXPORT_SUBST_H in the script.
# Returns a dictionary with key values from the repository
is_tagged = False
for git_ref in GIT_EXPORT_SUBST_D.split(r', '):
match = re.match(r'^tag: (v[1-9].+)', git_ref)
if match:
is_tagged = True
vcs_tag = match.groups()[0]
if is_tagged:
print(f"We are on tag {vcs_tag}.")
package_string = tagged_version_extra
else:
print("We are not tagged.")
package_string = untagged_version_extra
# Always 0 commits for a git archive
num_commits = 0
# Assume a full commit hash, abbreviate it.
commit_id = GIT_EXPORT_SUBST_H[:GIT_ABBREV_LENGTH]
package_string = package_string.replace("{vcsinfo}", str(num_commits) + "-" + commit_id)
repo_data = {}
repo_data['commit_id'] = commit_id
repo_data['enable_vcsversion'] = True
repo_data['info_source'] = "git archive"
repo_data['is_tagged'] = is_tagged
repo_data['num_commits'] = num_commits
repo_data['package_string'] = package_string
return repo_data
def read_git_repo(src_dir, tagged_version_extra, untagged_version_extra):
# Reads metadata from the git repo for generating the version string
# Returns the data in a dict
IS_GIT_INSTALLED = shutil.which('git') != ''
if not IS_GIT_INSTALLED:
print("Git unavailable. Git revision will be missing from version string.", file=sys.stderr)
return {}
GIT_DIR = os.path.join(src_dir, '.git')
# Check whether to include VCS version information in vcs_version.h
enable_vcsversion = True
git_get_commondir_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" rev-parse --git-common-dir')
git_commondir = subprocess.check_output(git_get_commondir_cmd, universal_newlines=True).strip()
if git_commondir and os.path.exists(f"{git_commondir}{os.sep}wireshark-disable-versioning"):
print("Header versioning disabled using git override.")
enable_vcsversion = False
git_last_changetime_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" log -1 --pretty=format:%at')
git_last_changetime = subprocess.check_output(git_last_changetime_cmd, universal_newlines=True).strip()
# Commits since last annotated tag.
# Output could be something like: v3.7.2rc0-64-g84d83a8292cb
# Or g84d83a8292cb
git_last_annotated_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" describe --abbrev={GIT_ABBREV_LENGTH} --long --always --match "v[1-9]*"')
git_last_annotated = subprocess.check_output(git_last_annotated_cmd, universal_newlines=True).strip()
parts = git_last_annotated.split('-')
git_description = git_last_annotated
if len(parts) > 1:
num_commits = int(parts[1])
else:
num_commits = 0
commit_id = parts[-1]
release_candidate = ''
RC_PATTERN = r'^v\d+\.\d+\.\d+(rc\d+)$'
match = re.match(RC_PATTERN, parts[0])
if match:
release_candidate = match.groups()[0]
# This command is expected to fail if the version is not tagged
try:
git_vcs_tag_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" describe --exact-match --match "v[1-9]*"')
git_vcs_tag = subprocess.check_output(git_vcs_tag_cmd, stderr=subprocess.DEVNULL, universal_newlines=True).strip()
is_tagged = True
except subprocess.CalledProcessError:
is_tagged = False
git_timestamp = ""
if num_commits == 0:
# Get the timestamp; format is similar to: 2022-06-27 23:09:20 -0400
# Note: This doesn't appear to be used, only checked for command success
git_timestamp_cmd = shlex.split(f'git --git-dir="{GIT_DIR}" log --format="%ad" -n 1 --date=iso')
git_timestamp = subprocess.check_output(git_timestamp_cmd, universal_newlines=True).strip()
if is_tagged:
print(f"We are on tag {git_vcs_tag}.")
package_string = tagged_version_extra
else:
print("We are not tagged.")
package_string = untagged_version_extra
package_string = release_candidate + package_string.replace("{vcsinfo}", str(num_commits) + "-" + commit_id)
repo_data = {}
repo_data['commit_id'] = commit_id
repo_data['enable_vcsversion'] = enable_vcsversion
repo_data['git_timestamp'] = git_timestamp
repo_data['git_description'] = git_description
repo_data['info_source'] = "Command line (git)"
repo_data['is_tagged'] = is_tagged
repo_data['last_change'] = git_last_changetime
repo_data['num_commits'] = num_commits
repo_data['package_string'] = package_string
return repo_data
def parse_versionstring(version_arg):
version_parts = version_arg.split('.')
if len(version_parts) != 3:
msg = "Version must have three numbers of the form x.y.z. You entered: " + version_arg
raise argparse.ArgumentTypeError(msg)
for i, version_type in enumerate(('Major', 'Minor', 'Patch')):
try:
int(version_parts[i])
except ValueError:
msg = f"{version_type} version must be a number! {version_type} version was '{version_parts[i]}'"
raise argparse.ArgumentTypeError(msg)
return version_parts
def read_repo_info(src_dir, tagged_version_extra, untagged_version_extra):
if IS_GIT_ARCHIVE:
repo_data = read_git_archive(tagged_version_extra, untagged_version_extra)
elif os.path.exists(src_dir + os.sep + '.git') and not os.path.exists(os.path.join(src_dir, '.git', 'svn')):
repo_data = read_git_repo(src_dir, tagged_version_extra, untagged_version_extra)
else:
raise Exception(src_dir + " does not appear to be a git repo or git archive!")
cmake_path = os.path.join(src_dir, "CMakeLists.txt")
with open(cmake_path, encoding='utf-8') as fh:
version_major, version_minor, version_patch = get_version(fh.read())
repo_data['version_major'] = version_major
repo_data['version_minor'] = version_minor
repo_data['version_patch'] = version_patch
return repo_data
# CMakeLists.txt calls this with no arguments to create vcs_version.h
# AppVeyor calls this with --set-release --untagged-version-extra=-{vcsinfo}-AppVeyor --tagged-version-extra=-AppVeyor
# .gitlab-ci calls this with --set-release
# Release checklist requires --set-version
def main():
parser = argparse.ArgumentParser(description='Wireshark file and package versions')
action_group = parser.add_mutually_exclusive_group()
action_group.add_argument('--set-version', '-v', metavar='<x.y.z>', type=parse_versionstring, help='Set the major, minor, and patch versions in the top-level CMakeLists.txt, docbook/attributes.adoc, packaging/debian/changelog, and the CMakeLists.txt for all libraries to the provided version number')
action_group.add_argument('--set-release', '-r', action='store_true', help='Set the extra release information in the top-level CMakeLists.txt based on either default or command-line specified options.')
setrel_group = parser.add_argument_group()
setrel_group.add_argument('--tagged-version-extra', '-t', default="", help="Extra version information format to use when a tag is found. No format \
(an empty string) is used by default.")
setrel_group.add_argument('--untagged-version-extra', '-u', default='-{vcsinfo}', help='Extra version information format to use when no tag is found. The format "-{vcsinfo}" (the number of commits and commit ID) is used by default.')
parser.add_argument("src_dir", metavar='src_dir', nargs=1, help="path to source code")
args = parser.parse_args()
src_dir = args.src_dir[0]
if args.set_version:
repo_data = {}
repo_data['version_major'] = args.set_version[0]
repo_data['version_minor'] = args.set_version[1]
repo_data['version_patch'] = args.set_version[2]
repo_data['package_string'] = ''
else:
repo_data = read_repo_info(src_dir, args.tagged_version_extra, args.untagged_version_extra)
set_vcs = not (args.set_release or args.set_version)
VERSION_FILE = 'vcs_version.h'
print_VCS_REVISION(VERSION_FILE, repo_data, set_vcs)
if args.set_release or args.set_version:
update_versioned_files(src_dir, args.set_version, repo_data)
if __name__ == "__main__":
main() |
C | wireshark/tools/make_charset_table.c | /* make_charset_table.c
* sample program to generate tables for charsets.c using iconv
*
* public domain
*/
#include <stdio.h>
#include <stdint.h>
#include <errno.h>
#include <iconv.h>
#define UNREPL 0xFFFD
int main(int argc, char **argv) {
/* for now only UCS-2 */
uint16_t table[0x100];
iconv_t conv;
const char *charset;
int i, j;
/* 0x00 ... 0x7F same as ASCII? */
int ascii_based = 1;
/* 0x00 ... 0x9F same as ISO? */
int iso_based = 1;
if (argc != 2) {
printf("usage: %s <charset>\n", argv[0]);
return 1;
}
charset = argv[1];
conv = iconv_open("UCS-2", charset);
if (conv == (iconv_t) -1) {
perror("iconv_open");
return 2;
}
iconv_close(conv);
for (i = 0x00; i < 0x100; i++) {
unsigned char in[1], out[2];
size_t inlen = 1, outlen = 2;
char *inbuf = (char *) in;
char *outbuf = (char *) out;
size_t ret;
in[0] = i;
conv = iconv_open("UCS-2BE", charset);
if (conv == (iconv_t) -1) {
/* shouldn't fail now */
perror("iconv_open");
return 2;
}
ret = iconv(conv, &inbuf, &inlen, &outbuf, &outlen);
if (ret == (size_t) -1 && errno == EILSEQ) {
table[i] = UNREPL;
iconv_close(conv);
continue;
}
if (ret == (size_t) -1) {
perror("iconv");
iconv_close(conv);
return 4;
}
iconv_close(conv);
if (ret != 0 || inlen != 0 || outlen != 0) {
fprintf(stderr, "%d: smth went wrong: %zu %zu %zu\n", i, ret, inlen, outlen);
return 3;
}
if (i < 0x80 && (out[0] != 0 || out[1] != i))
ascii_based = 0;
if (i < 0xA0 && (out[0] != 0 || out[1] != i))
iso_based = 0;
table[i] = (out[0] << 8) | out[1];
}
/* iso_based not supported */
iso_based = 0;
printf("/* generated by %s %s */\n", argv[0], charset);
if (iso_based)
i = 0xA0;
else if (ascii_based)
i = 0x80;
else
i = 0;
printf("const gunichar2 charset_table_%s[0x%x] = {\n", charset, 0x100 - i);
while (i < 0x100) {
int start = i;
printf(" ");
for (j = 0; j < 8; j++, i++) {
if (table[i] == UNREPL)
printf("UNREPL, ");
else
printf("0x%.4x, ", table[i]);
}
if ((start & 0xf) == 0)
printf(" /* 0x%.2X - */", start);
else
printf(" /* - 0x%.2X */", i - 1);
printf("\n");
}
printf("};\n");
return 0;
} |
Shell Script | wireshark/tools/mingw-rpm-setup.sh | #!/bin/bash
# Setup development environment on Fedora Linux for MinGW-w64
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# We drag in tools that might not be needed by all users; it's easier
# that way.
#
function print_usage() {
printf "\\nUtility to setup a Fedora MinGW-w64 system for Wireshark development.\\n"
printf "The basic usage installs the needed software\\n\\n"
printf "Usage: %s [...other options...]\\n" "$0"
printf "\\t--install-all: install everything\\n"
printf "\\t[other]: other options are passed as-is to pacman\\n"
printf "\\tPass --noconfirm to bypass any \"are you sure?\" messages.\\n"
}
OPTIONS=
for arg; do
case $arg in
--help)
print_usage
exit 0
;;
--install-all)
;;
*)
OPTIONS="$OPTIONS $arg"
;;
esac
done
BASIC_LIST="mingw64-gcc \
mingw64-gcc-c++ \
mingw64-glib2 \
mingw64-libgcrypt \
mingw64-c-ares \
mingw64-qt6-qtbase \
mingw64-qt6-qt5compat \
mingw64-qt6-qtmultimedia \
mingw64-qt6-qttools \
mingw64-speexdsp \
mingw32-nsis \
mingw64-nsis \
mingw64-gnutls \
mingw64-brotli \
mingw64-minizip \
mingw64-opus \
mingw64-wpcap \
mingw64-libxml2 \
ninja-build \
flex \
lemon \
asciidoctor \
libxslt \
docbook-style-xsl \
ccache \
git \
patch \
cmake"
ACTUAL_LIST=$BASIC_LIST
dnf install $ACTUAL_LIST $OPTIONS |
wireshark/tools/msnchat | #!/usr/bin/env python
"""
Process packet capture files and produce a nice HTML
report of MSN Chat sessions.
Copyright (c) 2003 by Gilbert Ramirez <[email protected]>
SPDX-License-Identifier: GPL-2.0-or-later
"""
import os
import re
import sys
import array
import string
import WiresharkXML
import getopt
# By default we output the HTML to stdout
out_fh = sys.stdout
class MSNMessage:
pass
class MSN_MSG(MSNMessage):
def __init__(self, timestamp, user, message):
self.timestamp = timestamp
self.user = user
self.message = message
class Conversation:
"""Keeps track of a single MSN chat session"""
re_MSG_out = re.compile("MSG (?P<TrID>\d+) (?P<ACKTYPE>[UNA]) (?P<len>\d+)")
re_MSG_in = re.compile("MSG (?P<user>\S+)@(?P<domain>\S+) (?P<alias>\S+) (?P<len>\d+)")
USER_NOT_FOUND = -1
DEFAULT_USER = None
DEFAULT_USER_COLOR = "#0000ff"
USER_COLORS = [ "#ff0000", "#00ff00",
"#800000", "#008000", "#000080" ]
DEFAULT_USER_TEXT_COLOR = "#000000"
USER_TEXT_COLOR = "#000080"
def __init__(self):
self.packets = []
self.messages = []
def AddPacket(self, packet):
self.packets.append(packet)
def Summarize(self):
for packet in self.packets:
msg = self.CreateMSNMessage(packet)
if msg:
self.messages.append(msg)
else:
#XXX
pass
def CreateMSNMessage(self, packet):
msnms = packet.get_items("msnms")[0]
# Check the first line in the msnms transmission for the user
child = msnms.children[0]
user = self.USER_NOT_FOUND
m = self.re_MSG_out.search(child.show)
if m:
user = self.DEFAULT_USER
else:
m = self.re_MSG_in.search(child.show)
if m:
user = m.group("alias")
if user == self.USER_NOT_FOUND:
print >> sys.stderr, "No match for", child.show
sys.exit(1)
return None
msg = ""
i = 5
check_trailing = 0
if len(msnms.children) > 5:
check_trailing = 1
while i < len(msnms.children):
msg += msnms.children[i].show
if check_trailing:
j = msg.find("MSG ")
if j >= 0:
msg = msg[:j]
i += 5
else:
i += 6
else:
i += 6
timestamp = packet.get_items("frame.time")[0].get_show()
i = timestamp.rfind(".")
timestamp = timestamp[:i]
return MSN_MSG(timestamp, user, msg)
def MsgToHTML(self, text):
bytes = array.array("B")
new_string = text
i = new_string.find("\\")
while i > -1:
# At the end?
if i == len(new_string) - 1:
# Just let the default action
# copy everything to 'bytes'
break
if new_string[i+1] in string.digits:
left = new_string[:i]
bytes.fromstring(left)
right = new_string[i+4:]
oct_string = new_string[i+1:i+4]
char = int(oct_string, 8)
bytes.append(char)
new_string = right
# ignore \r and \n
elif new_string[i+1] in "rn":
copy_these = new_string[:i]
bytes.fromstring(copy_these)
new_string = new_string[i+2:]
else:
copy_these = new_string[:i+2]
bytes.fromstring(copy_these)
new_string = new_string[i+2:]
i = new_string.find("\\")
bytes.fromstring(new_string)
return bytes
def CreateHTML(self, default_user):
if not self.messages:
return
print >> out_fh, """
<HR><BR><H3 Align=Center> ---- New Conversation @ %s ----</H3><BR>""" \
% (self.messages[0].timestamp)
user_color_assignments = {}
for msg in self.messages:
# Calculate 'user' and 'user_color' and 'user_text_color'
if msg.user == self.DEFAULT_USER:
user = default_user
user_color = self.DEFAULT_USER_COLOR
user_text_color = self.DEFAULT_USER_TEXT_COLOR
else:
user = msg.user
user_text_color = self.USER_TEXT_COLOR
if user_color_assignments.has_key(user):
user_color = user_color_assignments[user]
else:
num_assigned = len(user_color_assignments.keys())
user_color = self.USER_COLORS[num_assigned]
user_color_assignments[user] = user_color
# "Oct 6, 2003 21:45:25" --> "21:45:25"
timestamp = msg.timestamp.split()[-1]
htmlmsg = self.MsgToHTML(msg.message)
print >> out_fh, """
<FONT COLOR="%s"><FONT SIZE="2">(%s) </FONT><B>%s:</B></FONT> <FONT COLOR="%s">""" \
% (user_color, timestamp, user, user_text_color)
htmlmsg.tofile(out_fh)
print >> out_fh, "</FONT><BR>"
class CaptureFile:
"""Parses a single a capture file and keeps track of
all chat sessions in the file."""
def __init__(self, capture_filename, tshark):
"""Run tshark on the capture file and parse
the data."""
self.conversations = []
self.conversations_map = {}
pipe = os.popen(tshark + " -Tpdml -n -R "
"'msnms contains \"X-MMS-IM-Format\"' "
"-r " + capture_filename, "r")
WiresharkXML.parse_fh(pipe, self.collect_packets)
for conv in self.conversations:
conv.Summarize()
def collect_packets(self, packet):
"""Collect the packets passed back from WiresharkXML.
Sort them by TCP/IP conversation, as there could be multiple
clients per machine."""
# Just in case we're looking at tunnelling protocols where
# more than one IP or TCP header exists, look at the last one,
# which would be the one inside the tunnel.
src_ip = packet.get_items("ip.src")[-1].get_show()
dst_ip = packet.get_items("ip.dst")[-1].get_show()
src_tcp = packet.get_items("tcp.srcport")[-1].get_show()
dst_tcp = packet.get_items("tcp.dstport")[-1].get_show()
key_params = [src_ip, dst_ip, src_tcp, dst_tcp]
key_params.sort()
key = '|'.join(key_params)
if not self.conversations_map.has_key(key):
conv = self.conversations_map[key] = Conversation()
self.conversations.append(conv)
else:
conv = self.conversations_map[key]
conv.AddPacket(packet)
def CreateHTML(self, default_user):
if not self.conversations:
return
for conv in self.conversations:
conv.CreateHTML(default_user)
def run_filename(filename, default_user, tshark):
"""Process one capture file."""
capture = CaptureFile(filename, tshark)
capture.CreateHTML(default_user)
def run(filenames, default_user, tshark):
# HTML Header
print >> out_fh, """
<HTML><TITLE>MSN Conversation</TITLE>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<BODY>
"""
for filename in filenames:
run_filename(filename, default_user, tshark)
# HTML Footer
print >> out_fh, """
<HR>
</BODY>
</HTML>
"""
def usage():
print >> sys.stderr, "msnchat [OPTIONS] CAPTURE_FILE [...]"
print >> sys.stderr, " -o FILE name of output file"
print >> sys.stderr, " -t TSHARK location of tshark binary"
print >> sys.stderr, " -u USER name for unknown user"
sys.exit(1)
def main():
default_user = "Unknown"
tshark = "tshark"
optstring = "ho:t:u:"
longopts = ["help"]
try:
opts, args = getopt.getopt(sys.argv[1:], optstring, longopts)
except getopt.GetoptError:
usage()
for opt, arg in opts:
if opt == "-h" or opt == "--help":
usage()
elif opt == "-o":
filename = arg
global out_fh
try:
out_fh = open(filename, "w")
except IOError:
sys.exit("Could not open %s for writing." % (filename,))
elif opt == "-u":
default_user = arg
elif opt == "-t":
tshark = arg
else:
sys.exit("Unhandled command-line option: " + opt)
run(args, default_user, tshark)
if __name__ == '__main__':
main() |
|
Shell Script | wireshark/tools/msys2-setup.sh | #!/bin/bash
# Setup development environment on MSYS2
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# We drag in tools that might not be needed by all users; it's easier
# that way.
#
function print_usage() {
printf "\\nUtility to setup an MSYS2 MinGW-w64 system for Wireshark development.\\n"
printf "The basic usage installs the needed software\\n\\n"
printf "Usage: %s [--install-optional] [...other options...]\\n" "$0"
printf "\\t--install-optional: install optional software as well\\n"
printf "\\t--install-test-deps: install packages required to run all tests\\n"
printf "\\t--install-all: install everything\\n"
printf "\\t[other]: other options are passed as-is to pacman\\n"
printf "\\tPass --noconfirm to bypass any \"are you sure?\" messages.\\n"
}
ADDITIONAL=0
TESTDEPS=0
OPTIONS=
for arg; do
case $arg in
--help)
print_usage
exit 0
;;
--install-optional)
ADDITIONAL=1
;;
--install-test-deps)
TESTDEPS=1
;;
--install-all)
ADDITIONAL=1
TESTDEPS=1
;;
*)
OPTIONS="$OPTIONS $arg"
;;
esac
done
PACKAGE_PREFIX="${MINGW_PACKAGE_PREFIX:-mingw-w64-x86_64}"
#
# Lua packaging is kind of a mess. Lua 5.2 is not available. Some packages have
# a hard dependy on LuaJIT and it conflicts with Lua 5.1 and vice-versa.
# This will probably have to be fixed by the MSYS2 maintainers.
# XXX Is this still true?
#
BASIC_LIST="base-devel \
git \
${PACKAGE_PREFIX}-bcg729 \
${PACKAGE_PREFIX}-brotli \
${PACKAGE_PREFIX}-c-ares \
${PACKAGE_PREFIX}-cmake \
${PACKAGE_PREFIX}-glib2 \
${PACKAGE_PREFIX}-gnutls \
${PACKAGE_PREFIX}-libgcrypt \
${PACKAGE_PREFIX}-libilbc \
${PACKAGE_PREFIX}-libmaxminddb \
${PACKAGE_PREFIX}-nghttp2 \
${PACKAGE_PREFIX}-libpcap \
${PACKAGE_PREFIX}-libsmi \
${PACKAGE_PREFIX}-libssh \
${PACKAGE_PREFIX}-libxml2 \
${PACKAGE_PREFIX}-lz4 \
${PACKAGE_PREFIX}-minizip \
${PACKAGE_PREFIX}-ninja \
${PACKAGE_PREFIX}-opencore-amr \
${PACKAGE_PREFIX}-opus \
${PACKAGE_PREFIX}-pcre2 \
${PACKAGE_PREFIX}-python \
${PACKAGE_PREFIX}-qt6-base \
${PACKAGE_PREFIX}-qt6-multimedia \
${PACKAGE_PREFIX}-qt6-tools \
${PACKAGE_PREFIX}-qt6-translations \
${PACKAGE_PREFIX}-qt6-5compat \
${PACKAGE_PREFIX}-sbc \
${PACKAGE_PREFIX}-snappy \
${PACKAGE_PREFIX}-spandsp \
${PACKAGE_PREFIX}-speexdsp \
${PACKAGE_PREFIX}-toolchain \
${PACKAGE_PREFIX}-winsparkle \
${PACKAGE_PREFIX}-zlib \
${PACKAGE_PREFIX}-zstd"
ADDITIONAL_LIST="${PACKAGE_PREFIX}-asciidoctor \
${PACKAGE_PREFIX}-ccache \
${PACKAGE_PREFIX}-docbook-xsl \
${PACKAGE_PREFIX}-doxygen \
${PACKAGE_PREFIX}-libxslt \
${PACKAGE_PREFIX}-perl \
${PACKAGE_PREFIX}-ntldd"
TESTDEPS_LIST="${PACKAGE_PREFIX}-python-pytest \
${PACKAGE_PREFIX}-python-pytest-xdist"
ACTUAL_LIST=$BASIC_LIST
if [ $ADDITIONAL -ne 0 ]
then
ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST"
fi
if [ $TESTDEPS -ne 0 ]
then
ACTUAL_LIST="$ACTUAL_LIST $TESTDEPS_LIST"
fi
# Partial upgrades are unsupported.
pacman --sync --refresh --sysupgrade --needed $ACTUAL_LIST $OPTIONS || exit 2
if [ $ADDITIONAL -eq 0 ]
then
printf "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n"
fi
if [ $TESTDEPS -eq 0 ]
then
printf "\n*** Test deps not installed. Rerun with --install-test-deps to have them.\n"
fi |
Python | wireshark/tools/msys2checkdeps.py | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------------------
# list or check dependencies for binary distributions based on MSYS2 (requires the package mingw-w64-ntldd)
#
# run './msys2checkdeps.py --help' for usage information
# ------------------------------------------------------------------------------------------------------------------
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
from __future__ import print_function
import argparse
import os
import subprocess
import sys
SYSTEMROOT = os.environ['SYSTEMROOT']
class Dependency:
def __init__(self):
self.location = None
self.dependents = set()
def warning(msg):
print("Warning: " + msg, file=sys.stderr)
def error(msg):
print("Error: " + msg, file=sys.stderr)
exit(1)
def call_ntldd(filename):
try:
output = subprocess.check_output(['ntldd', '-R', filename], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error("'ntldd' failed with '" + str(e) + "'")
except WindowsError as e:
error("Calling 'ntldd' failed with '" + str(e) + "' (have you installed 'mingw-w64-ntldd-git'?)")
except Exception as e:
error("Calling 'ntldd' failed with '" + str(e) + "'")
return output.decode('utf-8')
def get_dependencies(filename, deps):
raw_list = call_ntldd(filename)
skip_indent = float('Inf')
parents = {}
parents[0] = os.path.basename(filename)
for line in raw_list.splitlines():
line = line[1:]
indent = len(line) - len(line.lstrip())
if indent > skip_indent:
continue
else:
skip_indent = float('Inf')
# if the dependency is not found in the working directory ntldd tries to find it on the search path
# which is indicated by the string '=>' followed by the determined location or 'not found'
if ('=>' in line):
(lib, location) = line.lstrip().split(' => ')
if location == 'not found':
location = None
else:
location = location.rsplit('(', 1)[0].strip()
else:
lib = line.rsplit('(', 1)[0].strip()
location = os.getcwd()
parents[indent+1] = lib
# we don't care about Microsoft libraries and their dependencies
if location and SYSTEMROOT in location:
skip_indent = indent
continue
if lib not in deps:
deps[lib] = Dependency()
deps[lib].location = location
deps[lib].dependents.add(parents[indent])
return deps
def collect_dependencies(path):
# collect dependencies
# - each key in 'deps' will be the filename of a dependency
# - the corresponding value is an instance of class Dependency (containing full path and dependents)
deps = {}
if os.path.isfile(path):
deps = get_dependencies(path, deps)
elif os.path.isdir(path):
extensions = ['.exe', '.pyd', '.dll']
exclusions = ['distutils/command/wininst'] # python
for base, dirs, files in os.walk(path):
for f in files:
filepath = os.path.join(base, f)
(_, ext) = os.path.splitext(f)
if (ext.lower() not in extensions) or any(exclusion in filepath for exclusion in exclusions):
continue
deps = get_dependencies(filepath, deps)
return deps
if __name__ == '__main__':
modes = ['list', 'list-compact', 'check', 'check-missing', 'check-unused']
# parse arguments from command line
parser = argparse.ArgumentParser(description="List or check dependencies for binary distributions based on MSYS2.\n"
"(requires the package 'mingw-w64-ntldd')",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('mode', metavar="MODE", choices=modes,
help="One of the following:\n"
" list - list dependencies in human-readable form\n"
" with full path and list of dependents\n"
" list-compact - list dependencies in compact form (as a plain list of filenames)\n"
" check - check for missing or unused dependencies (see below for details)\n"
" check-missing - check if all required dependencies are present in PATH\n"
" exits with error code 2 if missing dependencies are found\n"
" and prints the list to stderr\n"
" check-unused - check if any of the libraries in the root of PATH are unused\n"
" and prints the list to stderr")
parser.add_argument('path', metavar='PATH',
help="full or relative path to a single file or a directory to work on\n"
"(directories will be checked recursively)")
parser.add_argument('-w', '--working-directory', metavar="DIR",
help="Use custom working directory (instead of 'dirname PATH')")
args = parser.parse_args()
# check if path exists
args.path = os.path.abspath(args.path)
if not os.path.exists(args.path):
error("Can't find file/folder '" + args.path + "'")
# get root and set it as working directory (unless one is explicitly specified)
if args.working_directory:
root = os.path.abspath(args.working_directory)
elif os.path.isdir(args.path):
root = args.path
elif os.path.isfile(args.path):
root = os.path.dirname(args.path)
os.chdir(root)
# get dependencies for path recursively
deps = collect_dependencies(args.path)
# print output / prepare exit code
exit_code = 0
for dep in sorted(deps):
location = deps[dep].location
dependents = deps[dep].dependents
if args.mode == 'list':
if (location is None):
location = '---MISSING---'
print(dep + " - " + location + " (" + ", ".join(dependents) + ")")
elif args.mode == 'list-compact':
print(dep)
elif args.mode in ['check', 'check-missing']:
if ((location is None) or (root not in os.path.abspath(location))):
warning("Missing dependency " + dep + " (" + ", ".join(dependents) + ")")
exit_code = 2
# check for unused libraries
if args.mode in ['check', 'check-unused']:
installed_libs = [file for file in os.listdir(root) if file.endswith(".dll")]
deps_lower = [dep.lower() for dep in deps]
top_level_libs = [lib for lib in installed_libs if lib.lower() not in deps_lower]
for top_level_lib in top_level_libs:
warning("Unused dependency " + top_level_lib)
exit(exit_code) |
Python | wireshark/tools/ncp2222.py | #!/usr/bin/env python3
"""
Creates C code from a table of NCP type 0x2222 packet types.
(And 0x3333, which are the replies, but the packets are more commonly
refered to as type 0x2222; the 0x3333 replies are understood to be
part of the 0x2222 "family")
The data-munging code was written by Gilbert Ramirez.
The NCP data comes from Greg Morris <[email protected]>.
Many thanks to Novell for letting him work on this.
Additional data sources:
"Programmer's Guide to the NetWare Core Protocol" by Steve Conner and Dianne Conner.
At one time, Novell provided a list of NCPs by number at:
http://developer.novell.com/ndk/ncp.htm (where you could download an
*.exe file which installs a PDF, although you may have to create a login
to do this)
or
http://developer.novell.com/ndk/doc/ncp/
for a badly-formatted HTML version of the same PDF.
Currently, NCP documentation can be found at:
https://www.microfocus.com/documentation/open-enterprise-server-developer-documentation/ncp/
with a list of NCPs by number at
https://www.microfocus.com/documentation/open-enterprise-server-developer-documentation/ncp/ncpdocs/main.htm
and some additional NCPs to support volumes > 16TB at
https://www.microfocus.com/documentation/open-enterprise-server-developer-documentation/ncp/ncpdocs/16tb+.htm
NDS information can be found at:
https://www.microfocus.com/documentation/edirectory-developer-documentation/edirectory-libraries-for-c/
and PDFs linked from there, and from
https://www.novell.com/documentation/developer/ndslib/
and HTML versions linked from there.
The Novell eDirectory Schema Reference gives a "Transfer Format" for
some types, which may be the way they're sent over the wire.
Portions Copyright (c) 2000-2002 by Gilbert Ramirez <[email protected]>.
Portions Copyright (c) Novell, Inc. 2000-2003.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import os
import sys
import string
import getopt
import traceback
errors = {}
groups = {}
packets = []
compcode_lists = None
ptvc_lists = None
msg = None
reply_var = None
#ensure unique expert function declarations
expert_hash = {}
REC_START = 0
REC_LENGTH = 1
REC_FIELD = 2
REC_ENDIANNESS = 3
REC_VAR = 4
REC_REPEAT = 5
REC_REQ_COND = 6
REC_INFO_STR = 7
NO_VAR = -1
NO_REPEAT = -1
NO_REQ_COND = -1
NO_LENGTH_CHECK = -2
PROTO_LENGTH_UNKNOWN = -1
global_highest_var = -1
global_req_cond = {}
REQ_COND_SIZE_VARIABLE = "REQ_COND_SIZE_VARIABLE"
REQ_COND_SIZE_CONSTANT = "REQ_COND_SIZE_CONSTANT"
##############################################################################
# Global containers
##############################################################################
class UniqueCollection:
"""The UniqueCollection class stores objects which can be compared to other
objects of the same class. If two objects in the collection are equivalent,
only one is stored."""
def __init__(self, name):
"Constructor"
self.name = name
self.members = []
self.member_reprs = {}
def Add(self, object):
"""Add an object to the members lists, if a comparable object
doesn't already exist. The object that is in the member list, that is
either the object that was added or the comparable object that was
already in the member list, is returned."""
r = repr(object)
# Is 'object' a duplicate of some other member?
if r in self.member_reprs:
return self.member_reprs[r]
else:
self.member_reprs[r] = object
self.members.append(object)
return object
def Members(self):
"Returns the list of members."
return self.members
def HasMember(self, object):
"Does the list of members contain the object?"
if repr(object) in self.member_reprs:
return 1
else:
return 0
# This list needs to be defined before the NCP types are defined,
# because the NCP types are defined in the global scope, not inside
# a function's scope.
ptvc_lists = UniqueCollection('PTVC Lists')
##############################################################################
class NamedList:
"NamedList's keep track of PTVC's and Completion Codes"
def __init__(self, name, list):
"Constructor"
self.name = name
self.list = list
def __cmp__(self, other):
"Compare this NamedList to another"
if isinstance(other, NamedList):
return cmp(self.list, other.list)
else:
return 0
def Name(self, new_name = None):
"Get/Set name of list"
if new_name is not None:
self.name = new_name
return self.name
def Records(self):
"Returns record lists"
return self.list
def Null(self):
"Is there no list (different from an empty list)?"
return self.list is None
def Empty(self):
"It the list empty (different from a null list)?"
assert(not self.Null())
if self.list:
return 0
else:
return 1
def __repr__(self):
return repr(self.list)
class PTVC(NamedList):
"""ProtoTree TVBuff Cursor List ("PTVC List") Class"""
def __init__(self, name, records, code):
"Constructor"
NamedList.__init__(self, name, [])
global global_highest_var
expected_offset = None
highest_var = -1
named_vars = {}
# Make a PTVCRecord object for each list in 'records'
for record in records:
offset = record[REC_START]
length = record[REC_LENGTH]
field = record[REC_FIELD]
endianness = record[REC_ENDIANNESS]
info_str = record[REC_INFO_STR]
# Variable
var_name = record[REC_VAR]
if var_name:
# Did we already define this var?
if var_name in named_vars:
sys.exit("%s has multiple %s vars." % \
(name, var_name))
highest_var = highest_var + 1
var = highest_var
if highest_var > global_highest_var:
global_highest_var = highest_var
named_vars[var_name] = var
else:
var = NO_VAR
# Repeat
repeat_name = record[REC_REPEAT]
if repeat_name:
# Do we have this var?
if repeat_name not in named_vars:
sys.exit("%s does not have %s var defined." % \
(name, repeat_name))
repeat = named_vars[repeat_name]
else:
repeat = NO_REPEAT
# Request Condition
req_cond = record[REC_REQ_COND]
if req_cond != NO_REQ_COND:
global_req_cond[req_cond] = None
ptvc_rec = PTVCRecord(field, length, endianness, var, repeat, req_cond, info_str, code)
if expected_offset is None:
expected_offset = offset
elif expected_offset == -1:
pass
elif expected_offset != offset and offset != -1:
msg.write("Expected offset in %s for %s to be %d\n" % \
(name, field.HFName(), expected_offset))
sys.exit(1)
# We can't make a PTVC list from a variable-length
# packet, unless the fields can tell us at run time
# how long the packet is. That is, nstring8 is fine, since
# the field has an integer telling us how long the string is.
# Fields that don't have a length determinable at run-time
# cannot be variable-length.
if type(ptvc_rec.Length()) == type(()):
if isinstance(ptvc_rec.Field(), nstring):
expected_offset = -1
pass
elif isinstance(ptvc_rec.Field(), nbytes):
expected_offset = -1
pass
elif isinstance(ptvc_rec.Field(), struct):
expected_offset = -1
pass
else:
field = ptvc_rec.Field()
assert 0, "Cannot make PTVC from %s, type %s" % \
(field.HFName(), field)
elif expected_offset > -1:
if ptvc_rec.Length() < 0:
expected_offset = -1
else:
expected_offset = expected_offset + ptvc_rec.Length()
self.list.append(ptvc_rec)
def ETTName(self):
return "ett_%s" % (self.Name(),)
def Code(self):
x = "static const ptvc_record %s[] = {\n" % (self.Name())
for ptvc_rec in self.list:
x = x + " %s,\n" % (ptvc_rec.Code())
x = x + " { NULL, 0, NULL, NULL, NO_ENDIANNESS, NO_VAR, NO_REPEAT, NO_REQ_COND }\n"
x = x + "};\n"
return x
def __repr__(self):
x = ""
for ptvc_rec in self.list:
x = x + repr(ptvc_rec)
return x
class PTVCBitfield(PTVC):
def __init__(self, name, vars):
NamedList.__init__(self, name, [])
for var in vars:
ptvc_rec = PTVCRecord(var, var.Length(), var.Endianness(),
NO_VAR, NO_REPEAT, NO_REQ_COND, None, 0)
self.list.append(ptvc_rec)
def Code(self):
ett_name = self.ETTName()
x = "static int %s = -1;\n" % (ett_name,)
x = x + "static const ptvc_record ptvc_%s[] = {\n" % (self.Name())
for ptvc_rec in self.list:
x = x + " %s,\n" % (ptvc_rec.Code())
x = x + " { NULL, 0, NULL, NULL, NO_ENDIANNESS, NO_VAR, NO_REPEAT, NO_REQ_COND }\n"
x = x + "};\n"
x = x + "static const sub_ptvc_record %s = {\n" % (self.Name(),)
x = x + " &%s,\n" % (ett_name,)
x = x + " NULL,\n"
x = x + " ptvc_%s,\n" % (self.Name(),)
x = x + "};\n"
return x
class PTVCRecord:
def __init__(self, field, length, endianness, var, repeat, req_cond, info_str, code):
"Constructor"
self.field = field
self.length = length
self.endianness = endianness
self.var = var
self.repeat = repeat
self.req_cond = req_cond
self.req_info_str = info_str
self.__code__ = code
def __cmp__(self, other):
"Comparison operator"
if self.field != other.field:
return 1
elif self.length < other.length:
return -1
elif self.length > other.length:
return 1
elif self.endianness != other.endianness:
return 1
else:
return 0
def Code(self):
# Nice textual representations
if self.var == NO_VAR:
var = "NO_VAR"
else:
var = self.var
if self.repeat == NO_REPEAT:
repeat = "NO_REPEAT"
else:
repeat = self.repeat
if self.req_cond == NO_REQ_COND:
req_cond = "NO_REQ_COND"
else:
req_cond = global_req_cond[self.req_cond]
assert req_cond is not None
if isinstance(self.field, struct):
return self.field.ReferenceString(var, repeat, req_cond)
else:
return self.RegularCode(var, repeat, req_cond)
def InfoStrName(self):
"Returns a C symbol based on the NCP function code, for the info_str"
return "info_str_0x%x" % (self.__code__)
def RegularCode(self, var, repeat, req_cond):
"String representation"
endianness = 'ENC_BIG_ENDIAN'
if self.endianness == ENC_LITTLE_ENDIAN:
endianness = 'ENC_LITTLE_ENDIAN'
length = None
if type(self.length) == type(0):
length = self.length
else:
# This is for cases where a length is needed
# in order to determine a following variable-length,
# like nstring8, where 1 byte is needed in order
# to determine the variable length.
var_length = self.field.Length()
if var_length > 0:
length = var_length
if length == PROTO_LENGTH_UNKNOWN:
# XXX length = "PROTO_LENGTH_UNKNOWN"
pass
assert length, "Length not handled for %s" % (self.field.HFName(),)
sub_ptvc_name = self.field.PTVCName()
if sub_ptvc_name != "NULL":
sub_ptvc_name = "&%s" % (sub_ptvc_name,)
if self.req_info_str:
req_info_str = "&" + self.InfoStrName() + "_req"
else:
req_info_str = "NULL"
return "{ &%s, %s, %s, %s, %s, %s, %s, %s }" % \
(self.field.HFName(), length, sub_ptvc_name,
req_info_str, endianness, var, repeat, req_cond)
def Offset(self):
return self.offset
def Length(self):
return self.length
def Field(self):
return self.field
def __repr__(self):
if self.req_info_str:
return "{%s len=%s end=%s var=%s rpt=%s rqc=%s info=%s}" % \
(self.field.HFName(), self.length,
self.endianness, self.var, self.repeat, self.req_cond, self.req_info_str[1])
else:
return "{%s len=%s end=%s var=%s rpt=%s rqc=%s}" % \
(self.field.HFName(), self.length,
self.endianness, self.var, self.repeat, self.req_cond)
##############################################################################
class NCP:
"NCP Packet class"
def __init__(self, func_code, description, group, has_length=1):
"Constructor"
self.__code__ = func_code
self.description = description
self.group = group
self.codes = None
self.request_records = None
self.reply_records = None
self.has_length = has_length
self.req_cond_size = None
self.req_info_str = None
self.expert_func = None
if group not in groups:
msg.write("NCP 0x%x has invalid group '%s'\n" % \
(self.__code__, group))
sys.exit(1)
if self.HasSubFunction():
# NCP Function with SubFunction
self.start_offset = 10
else:
# Simple NCP Function
self.start_offset = 7
def ReqCondSize(self):
return self.req_cond_size
def ReqCondSizeVariable(self):
self.req_cond_size = REQ_COND_SIZE_VARIABLE
def ReqCondSizeConstant(self):
self.req_cond_size = REQ_COND_SIZE_CONSTANT
def FunctionCode(self, part=None):
"Returns the function code for this NCP packet."
if part is None:
return self.__code__
elif part == 'high':
if self.HasSubFunction():
return (self.__code__ & 0xff00) >> 8
else:
return self.__code__
elif part == 'low':
if self.HasSubFunction():
return self.__code__ & 0x00ff
else:
return 0x00
else:
msg.write("Unknown directive '%s' for function_code()\n" % (part))
sys.exit(1)
def HasSubFunction(self):
"Does this NPC packet require a subfunction field?"
if self.__code__ <= 0xff:
return 0
else:
return 1
def HasLength(self):
return self.has_length
def Description(self):
return self.description
def Group(self):
return self.group
def PTVCRequest(self):
return self.ptvc_request
def PTVCReply(self):
return self.ptvc_reply
def Request(self, size, records=[], **kwargs):
self.request_size = size
self.request_records = records
if self.HasSubFunction():
if self.HasLength():
self.CheckRecords(size, records, "Request", 10)
else:
self.CheckRecords(size, records, "Request", 8)
else:
self.CheckRecords(size, records, "Request", 7)
self.ptvc_request = self.MakePTVC(records, "request", self.__code__)
if "info_str" in kwargs:
self.req_info_str = kwargs["info_str"]
def Reply(self, size, records=[]):
self.reply_size = size
self.reply_records = records
self.CheckRecords(size, records, "Reply", 8)
self.ptvc_reply = self.MakePTVC(records, "reply", self.__code__)
def CheckRecords(self, size, records, descr, min_hdr_length):
"Simple sanity check"
if size == NO_LENGTH_CHECK:
return
min = size
max = size
if type(size) == type(()):
min = size[0]
max = size[1]
lower = min_hdr_length
upper = min_hdr_length
for record in records:
rec_size = record[REC_LENGTH]
rec_lower = rec_size
rec_upper = rec_size
if type(rec_size) == type(()):
rec_lower = rec_size[0]
rec_upper = rec_size[1]
lower = lower + rec_lower
upper = upper + rec_upper
error = 0
if min != lower:
msg.write("%s records for 2222/0x%x sum to %d bytes minimum, but param1 shows %d\n" \
% (descr, self.FunctionCode(), lower, min))
error = 1
if max != upper:
msg.write("%s records for 2222/0x%x sum to %d bytes maximum, but param1 shows %d\n" \
% (descr, self.FunctionCode(), upper, max))
error = 1
if error == 1:
sys.exit(1)
def MakePTVC(self, records, name_suffix, code):
"""Makes a PTVC out of a request or reply record list. Possibly adds
it to the global list of PTVCs (the global list is a UniqueCollection,
so an equivalent PTVC may already be in the global list)."""
name = "%s_%s" % (self.CName(), name_suffix)
#if any individual record has an info_str, bubble it up to the top
#so an info_string_t can be created for it
for record in records:
if record[REC_INFO_STR]:
self.req_info_str = record[REC_INFO_STR]
ptvc = PTVC(name, records, code)
#if the record is a duplicate, remove the req_info_str so
#that an unused info_string isn't generated
remove_info = 0
if ptvc_lists.HasMember(ptvc):
if 'info' in repr(ptvc):
remove_info = 1
ptvc_test = ptvc_lists.Add(ptvc)
if remove_info:
self.req_info_str = None
return ptvc_test
def CName(self):
"Returns a C symbol based on the NCP function code"
return "ncp_0x%x" % (self.__code__)
def InfoStrName(self):
"Returns a C symbol based on the NCP function code, for the info_str"
return "info_str_0x%x" % (self.__code__)
def MakeExpert(self, func):
self.expert_func = func
expert_hash[func] = func
def Variables(self):
"""Returns a list of variables used in the request and reply records.
A variable is listed only once, even if it is used twice (once in
the request, once in the reply)."""
variables = {}
if self.request_records:
for record in self.request_records:
var = record[REC_FIELD]
variables[var.HFName()] = var
sub_vars = var.SubVariables()
for sv in sub_vars:
variables[sv.HFName()] = sv
if self.reply_records:
for record in self.reply_records:
var = record[REC_FIELD]
variables[var.HFName()] = var
sub_vars = var.SubVariables()
for sv in sub_vars:
variables[sv.HFName()] = sv
return list(variables.values())
def CalculateReqConds(self):
"""Returns a list of request conditions (dfilter text) used
in the reply records. A request condition is listed only once,"""
texts = {}
if self.reply_records:
for record in self.reply_records:
text = record[REC_REQ_COND]
if text != NO_REQ_COND:
texts[text] = None
if len(texts) == 0:
self.req_conds = None
return None
dfilter_texts = list(texts.keys())
dfilter_texts.sort()
name = "%s_req_cond_indexes" % (self.CName(),)
return NamedList(name, dfilter_texts)
def GetReqConds(self):
return self.req_conds
def SetReqConds(self, new_val):
self.req_conds = new_val
def CompletionCodes(self, codes=None):
"""Sets or returns the list of completion
codes. Internally, a NamedList is used to store the
completion codes, but the caller of this function never
realizes that because Python lists are the input and
output."""
if codes is None:
return self.codes
# Sanity check
okay = 1
for code in codes:
if code not in errors:
msg.write("Errors table does not have key 0x%04x for NCP=0x%x\n" % (code,
self.__code__))
okay = 0
# Delay the exit until here so that the programmer can get
# the complete list of missing error codes
if not okay:
sys.exit(1)
# Create CompletionCode (NamedList) object and possible
# add it to the global list of completion code lists.
name = "%s_errors" % (self.CName(),)
codes.sort()
codes_list = NamedList(name, codes)
self.codes = compcode_lists.Add(codes_list)
self.Finalize()
def Finalize(self):
"""Adds the NCP object to the global collection of NCP
objects. This is done automatically after setting the
CompletionCode list. Yes, this is a shortcut, but it makes
our list of NCP packet definitions look neater, since an
explicit "add to global list of packets" is not needed."""
# Add packet to global collection of packets
packets.append(self)
def rec(start, length, field, endianness=None, **kw):
return _rec(start, length, field, endianness, kw)
def srec(field, endianness=None, **kw):
return _rec(-1, -1, field, endianness, kw)
def _rec(start, length, field, endianness, kw):
# If endianness not explicitly given, use the field's
# default endiannes.
if endianness is None:
endianness = field.Endianness()
# Setting a var?
if "var" in kw:
# Is the field an INT ?
if not isinstance(field, CountingNumber):
sys.exit("Field %s used as count variable, but not integer." \
% (field.HFName()))
var = kw["var"]
else:
var = None
# If 'var' not used, 'repeat' can be used.
if not var and "repeat" in kw:
repeat = kw["repeat"]
else:
repeat = None
# Request-condition ?
if "req_cond" in kw:
req_cond = kw["req_cond"]
else:
req_cond = NO_REQ_COND
if "info_str" in kw:
req_info_str = kw["info_str"]
else:
req_info_str = None
return [start, length, field, endianness, var, repeat, req_cond, req_info_str]
##############################################################################
ENC_LITTLE_ENDIAN = 1 # Little-Endian
ENC_BIG_ENDIAN = 0 # Big-Endian
NA = -1 # Not Applicable
class Type:
" Virtual class for NCP field types"
type = "Type"
ftype = None
disp = "BASE_DEC"
custom_func = None
endianness = NA
values = []
def __init__(self, abbrev, descr, bytes, endianness = NA):
self.abbrev = abbrev
self.descr = descr
self.bytes = bytes
self.endianness = endianness
self.hfname = "hf_ncp_" + self.abbrev
def Length(self):
return self.bytes
def Abbreviation(self):
return self.abbrev
def Description(self):
return self.descr
def HFName(self):
return self.hfname
def DFilter(self):
return "ncp." + self.abbrev
def WiresharkFType(self):
return self.ftype
def Display(self, newval=None):
if newval is not None:
self.disp = newval
return self.disp
def ValuesName(self):
if self.custom_func:
return "CF_FUNC(" + self.custom_func + ")"
else:
return "NULL"
def Mask(self):
return 0
def Endianness(self):
return self.endianness
def SubVariables(self):
return []
def PTVCName(self):
return "NULL"
def NWDate(self):
self.disp = "BASE_CUSTOM"
self.custom_func = "padd_date"
def NWTime(self):
self.disp = "BASE_CUSTOM"
self.custom_func = "padd_time"
#def __cmp__(self, other):
# return cmp(self.hfname, other.hfname)
def __lt__(self, other):
return (self.hfname < other.hfname)
class struct(PTVC, Type):
def __init__(self, name, items, descr=None):
name = "struct_%s" % (name,)
NamedList.__init__(self, name, [])
self.bytes = 0
self.descr = descr
for item in items:
if isinstance(item, Type):
field = item
length = field.Length()
endianness = field.Endianness()
var = NO_VAR
repeat = NO_REPEAT
req_cond = NO_REQ_COND
elif type(item) == type([]):
field = item[REC_FIELD]
length = item[REC_LENGTH]
endianness = item[REC_ENDIANNESS]
var = item[REC_VAR]
repeat = item[REC_REPEAT]
req_cond = item[REC_REQ_COND]
else:
assert 0, "Item %s item not handled." % (item,)
ptvc_rec = PTVCRecord(field, length, endianness, var,
repeat, req_cond, None, 0)
self.list.append(ptvc_rec)
self.bytes = self.bytes + field.Length()
self.hfname = self.name
def Variables(self):
vars = []
for ptvc_rec in self.list:
vars.append(ptvc_rec.Field())
return vars
def ReferenceString(self, var, repeat, req_cond):
return "{ PTVC_STRUCT, NO_LENGTH, &%s, NULL, NO_ENDIANNESS, %s, %s, %s }" % \
(self.name, var, repeat, req_cond)
def Code(self):
ett_name = self.ETTName()
x = "static int %s = -1;\n" % (ett_name,)
x = x + "static const ptvc_record ptvc_%s[] = {\n" % (self.name,)
for ptvc_rec in self.list:
x = x + " %s,\n" % (ptvc_rec.Code())
x = x + " { NULL, NO_LENGTH, NULL, NULL, NO_ENDIANNESS, NO_VAR, NO_REPEAT, NO_REQ_COND }\n"
x = x + "};\n"
x = x + "static const sub_ptvc_record %s = {\n" % (self.name,)
x = x + " &%s,\n" % (ett_name,)
if self.descr:
x = x + ' "%s",\n' % (self.descr,)
else:
x = x + " NULL,\n"
x = x + " ptvc_%s,\n" % (self.Name(),)
x = x + "};\n"
return x
def __cmp__(self, other):
return cmp(self.HFName(), other.HFName())
class byte(Type):
type = "byte"
ftype = "FT_UINT8"
def __init__(self, abbrev, descr):
Type.__init__(self, abbrev, descr, 1)
class CountingNumber:
pass
# Same as above. Both are provided for convenience
class uint8(Type, CountingNumber):
type = "uint8"
ftype = "FT_UINT8"
bytes = 1
def __init__(self, abbrev, descr):
Type.__init__(self, abbrev, descr, 1)
class uint16(Type, CountingNumber):
type = "uint16"
ftype = "FT_UINT16"
def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, 2, endianness)
class uint24(Type, CountingNumber):
type = "uint24"
ftype = "FT_UINT24"
def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, 3, endianness)
class uint32(Type, CountingNumber):
type = "uint32"
ftype = "FT_UINT32"
def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, 4, endianness)
class uint64(Type, CountingNumber):
type = "uint64"
ftype = "FT_UINT64"
def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, 8, endianness)
class eptime(Type, CountingNumber):
type = "eptime"
ftype = "FT_ABSOLUTE_TIME"
disp = "ABSOLUTE_TIME_LOCAL"
def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, 4, endianness)
class boolean8(uint8):
type = "boolean8"
ftype = "FT_BOOLEAN"
disp = "BASE_NONE"
class boolean16(uint16):
type = "boolean16"
ftype = "FT_BOOLEAN"
disp = "BASE_NONE"
class boolean24(uint24):
type = "boolean24"
ftype = "FT_BOOLEAN"
disp = "BASE_NONE"
class boolean32(uint32):
type = "boolean32"
ftype = "FT_BOOLEAN"
disp = "BASE_NONE"
class nstring:
pass
class nstring8(Type, nstring):
"""A string of up to (2^8)-1 characters. The first byte
gives the string length."""
type = "nstring8"
ftype = "FT_UINT_STRING"
disp = "BASE_NONE"
def __init__(self, abbrev, descr):
Type.__init__(self, abbrev, descr, 1)
class nstring16(Type, nstring):
"""A string of up to (2^16)-2 characters. The first 2 bytes
gives the string length."""
type = "nstring16"
ftype = "FT_UINT_STRING"
disp = "BASE_NONE"
def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, 2, endianness)
class nstring32(Type, nstring):
"""A string of up to (2^32)-4 characters. The first 4 bytes
gives the string length."""
type = "nstring32"
ftype = "FT_UINT_STRING"
disp = "BASE_NONE"
def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, 4, endianness)
class fw_string(Type):
"""A fixed-width string of n bytes."""
type = "fw_string"
disp = "BASE_NONE"
ftype = "FT_STRING"
def __init__(self, abbrev, descr, bytes):
Type.__init__(self, abbrev, descr, bytes)
class stringz(Type):
"NUL-terminated string, with a maximum length"
type = "stringz"
disp = "BASE_NONE"
ftype = "FT_STRINGZ"
def __init__(self, abbrev, descr):
Type.__init__(self, abbrev, descr, PROTO_LENGTH_UNKNOWN)
class val_string(Type):
"""Abstract class for val_stringN, where N is number
of bits that key takes up."""
type = "val_string"
disp = 'BASE_HEX'
def __init__(self, abbrev, descr, val_string_array, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, self.bytes, endianness)
self.values = val_string_array
def Code(self):
result = "static const value_string %s[] = {\n" \
% (self.ValuesCName())
for val_record in self.values:
value = val_record[0]
text = val_record[1]
value_repr = self.value_format % value
result = result + ' { %s, "%s" },\n' \
% (value_repr, text)
value_repr = self.value_format % 0
result = result + " { %s, NULL },\n" % (value_repr)
result = result + "};\n"
REC_VAL_STRING_RES = self.value_format % value
return result
def ValuesCName(self):
return "ncp_%s_vals" % (self.abbrev)
def ValuesName(self):
return "VALS(%s)" % (self.ValuesCName())
class val_string8(val_string):
type = "val_string8"
ftype = "FT_UINT8"
bytes = 1
value_format = "0x%02x"
class val_string16(val_string):
type = "val_string16"
ftype = "FT_UINT16"
bytes = 2
value_format = "0x%04x"
class val_string32(val_string):
type = "val_string32"
ftype = "FT_UINT32"
bytes = 4
value_format = "0x%08x"
class bytes(Type):
type = 'bytes'
disp = "BASE_NONE"
ftype = 'FT_BYTES'
def __init__(self, abbrev, descr, bytes):
Type.__init__(self, abbrev, descr, bytes, NA)
class nbytes:
pass
class nbytes8(Type, nbytes):
"""A series of up to (2^8)-1 bytes. The first byte
gives the byte-string length."""
type = "nbytes8"
ftype = "FT_UINT_BYTES"
disp = "BASE_NONE"
def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, 1, endianness)
class nbytes16(Type, nbytes):
"""A series of up to (2^16)-2 bytes. The first 2 bytes
gives the byte-string length."""
type = "nbytes16"
ftype = "FT_UINT_BYTES"
disp = "BASE_NONE"
def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, 2, endianness)
class nbytes32(Type, nbytes):
"""A series of up to (2^32)-4 bytes. The first 4 bytes
gives the byte-string length."""
type = "nbytes32"
ftype = "FT_UINT_BYTES"
disp = "BASE_NONE"
def __init__(self, abbrev, descr, endianness = ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, 4, endianness)
class bf_uint(Type):
type = "bf_uint"
disp = None
def __init__(self, bitmask, abbrev, descr, endianness=ENC_LITTLE_ENDIAN):
Type.__init__(self, abbrev, descr, self.bytes, endianness)
self.bitmask = bitmask
def Mask(self):
return self.bitmask
class bf_val_str(bf_uint):
type = "bf_uint"
disp = None
def __init__(self, bitmask, abbrev, descr, val_string_array, endiannes=ENC_LITTLE_ENDIAN):
bf_uint.__init__(self, bitmask, abbrev, descr, endiannes)
self.values = val_string_array
def ValuesName(self):
return "VALS(%s)" % (self.ValuesCName())
class bf_val_str8(bf_val_str, val_string8):
type = "bf_val_str8"
ftype = "FT_UINT8"
disp = "BASE_HEX"
bytes = 1
class bf_val_str16(bf_val_str, val_string16):
type = "bf_val_str16"
ftype = "FT_UINT16"
disp = "BASE_HEX"
bytes = 2
class bf_val_str32(bf_val_str, val_string32):
type = "bf_val_str32"
ftype = "FT_UINT32"
disp = "BASE_HEX"
bytes = 4
class bf_boolean:
disp = "BASE_NONE"
class bf_boolean8(bf_uint, boolean8, bf_boolean):
type = "bf_boolean8"
ftype = "FT_BOOLEAN"
disp = "8"
bytes = 1
class bf_boolean16(bf_uint, boolean16, bf_boolean):
type = "bf_boolean16"
ftype = "FT_BOOLEAN"
disp = "16"
bytes = 2
class bf_boolean24(bf_uint, boolean24, bf_boolean):
type = "bf_boolean24"
ftype = "FT_BOOLEAN"
disp = "24"
bytes = 3
class bf_boolean32(bf_uint, boolean32, bf_boolean):
type = "bf_boolean32"
ftype = "FT_BOOLEAN"
disp = "32"
bytes = 4
class bitfield(Type):
type = "bitfield"
disp = 'BASE_HEX'
def __init__(self, vars):
var_hash = {}
for var in vars:
if isinstance(var, bf_boolean):
if not isinstance(var, self.bf_type):
print("%s must be of type %s" % \
(var.Abbreviation(),
self.bf_type))
sys.exit(1)
var_hash[var.bitmask] = var
bitmasks = list(var_hash.keys())
bitmasks.sort()
bitmasks.reverse()
ordered_vars = []
for bitmask in bitmasks:
var = var_hash[bitmask]
ordered_vars.append(var)
self.vars = ordered_vars
self.ptvcname = "ncp_%s_bitfield" % (self.abbrev,)
self.hfname = "hf_ncp_%s" % (self.abbrev,)
self.sub_ptvc = PTVCBitfield(self.PTVCName(), self.vars)
def SubVariables(self):
return self.vars
def SubVariablesPTVC(self):
return self.sub_ptvc
def PTVCName(self):
return self.ptvcname
class bitfield8(bitfield, uint8):
type = "bitfield8"
ftype = "FT_UINT8"
bf_type = bf_boolean8
def __init__(self, abbrev, descr, vars):
uint8.__init__(self, abbrev, descr)
bitfield.__init__(self, vars)
class bitfield16(bitfield, uint16):
type = "bitfield16"
ftype = "FT_UINT16"
bf_type = bf_boolean16
def __init__(self, abbrev, descr, vars, endianness=ENC_LITTLE_ENDIAN):
uint16.__init__(self, abbrev, descr, endianness)
bitfield.__init__(self, vars)
class bitfield24(bitfield, uint24):
type = "bitfield24"
ftype = "FT_UINT24"
bf_type = bf_boolean24
def __init__(self, abbrev, descr, vars, endianness=ENC_LITTLE_ENDIAN):
uint24.__init__(self, abbrev, descr, endianness)
bitfield.__init__(self, vars)
class bitfield32(bitfield, uint32):
type = "bitfield32"
ftype = "FT_UINT32"
bf_type = bf_boolean32
def __init__(self, abbrev, descr, vars, endianness=ENC_LITTLE_ENDIAN):
uint32.__init__(self, abbrev, descr, endianness)
bitfield.__init__(self, vars)
#
# Force the endianness of a field to a non-default value; used in
# the list of fields of a structure.
#
def endian(field, endianness):
return [-1, field.Length(), field, endianness, NO_VAR, NO_REPEAT, NO_REQ_COND]
##############################################################################
# NCP Field Types. Defined in Appendix A of "Programmer's Guide..."
##############################################################################
AbortQueueFlag = val_string8("abort_q_flag", "Abort Queue Flag", [
[ 0x00, "Place at End of Queue" ],
[ 0x01, "Do Not Place Spool File, Examine Flags" ],
])
AcceptedMaxSize = uint16("accepted_max_size", "Accepted Max Size")
AcceptedMaxSize64 = uint64("accepted_max_size64", "Accepted Max Size")
AccessControl = val_string8("access_control", "Access Control", [
[ 0x00, "Open for read by this client" ],
[ 0x01, "Open for write by this client" ],
[ 0x02, "Deny read requests from other stations" ],
[ 0x03, "Deny write requests from other stations" ],
[ 0x04, "File detached" ],
[ 0x05, "TTS holding detach" ],
[ 0x06, "TTS holding open" ],
])
AccessDate = uint16("access_date", "Access Date")
AccessDate.NWDate()
AccessMode = bitfield8("access_mode", "Access Mode", [
bf_boolean8(0x01, "acc_mode_read", "Read Access"),
bf_boolean8(0x02, "acc_mode_write", "Write Access"),
bf_boolean8(0x04, "acc_mode_deny_read", "Deny Read Access"),
bf_boolean8(0x08, "acc_mode_deny_write", "Deny Write Access"),
bf_boolean8(0x10, "acc_mode_comp", "Compatibility Mode"),
])
AccessPrivileges = bitfield8("access_privileges", "Access Privileges", [
bf_boolean8(0x01, "acc_priv_read", "Read Privileges (files only)"),
bf_boolean8(0x02, "acc_priv_write", "Write Privileges (files only)"),
bf_boolean8(0x04, "acc_priv_open", "Open Privileges (files only)"),
bf_boolean8(0x08, "acc_priv_create", "Create Privileges (files only)"),
bf_boolean8(0x10, "acc_priv_delete", "Delete Privileges (files only)"),
bf_boolean8(0x20, "acc_priv_parent", "Parental Privileges (directories only for creating, deleting, and renaming)"),
bf_boolean8(0x40, "acc_priv_search", "Search Privileges (directories only)"),
bf_boolean8(0x80, "acc_priv_modify", "Modify File Status Flags Privileges (files and directories)"),
])
AccessRightsMask = bitfield8("access_rights_mask", "Access Rights", [
bf_boolean8(0x0001, "acc_rights_read", "Read Rights"),
bf_boolean8(0x0002, "acc_rights_write", "Write Rights"),
bf_boolean8(0x0004, "acc_rights_open", "Open Rights"),
bf_boolean8(0x0008, "acc_rights_create", "Create Rights"),
bf_boolean8(0x0010, "acc_rights_delete", "Delete Rights"),
bf_boolean8(0x0020, "acc_rights_parent", "Parental Rights"),
bf_boolean8(0x0040, "acc_rights_search", "Search Rights"),
bf_boolean8(0x0080, "acc_rights_modify", "Modify Rights"),
])
AccessRightsMaskWord = bitfield16("access_rights_mask_word", "Access Rights", [
bf_boolean16(0x0001, "acc_rights1_read", "Read Rights"),
bf_boolean16(0x0002, "acc_rights1_write", "Write Rights"),
bf_boolean16(0x0004, "acc_rights1_open", "Open Rights"),
bf_boolean16(0x0008, "acc_rights1_create", "Create Rights"),
bf_boolean16(0x0010, "acc_rights1_delete", "Delete Rights"),
bf_boolean16(0x0020, "acc_rights1_parent", "Parental Rights"),
bf_boolean16(0x0040, "acc_rights1_search", "Search Rights"),
bf_boolean16(0x0080, "acc_rights1_modify", "Modify Rights"),
bf_boolean16(0x0100, "acc_rights1_supervisor", "Supervisor Access Rights"),
])
AccountBalance = uint32("account_balance", "Account Balance")
AccountVersion = uint8("acct_version", "Acct Version")
ActionFlag = bitfield8("action_flag", "Action Flag", [
bf_boolean8(0x01, "act_flag_open", "Open"),
bf_boolean8(0x02, "act_flag_replace", "Replace"),
bf_boolean8(0x10, "act_flag_create", "Create"),
])
ActiveConnBitList = fw_string("active_conn_bit_list", "Active Connection List", 512)
ActiveIndexedFiles = uint16("active_indexed_files", "Active Indexed Files")
ActualMaxBinderyObjects = uint16("actual_max_bindery_objects", "Actual Max Bindery Objects")
ActualMaxIndexedFiles = uint16("actual_max_indexed_files", "Actual Max Indexed Files")
ActualMaxOpenFiles = uint16("actual_max_open_files", "Actual Max Open Files")
ActualMaxSimultaneousTransactions = uint16("actual_max_sim_trans", "Actual Max Simultaneous Transactions")
ActualMaxUsedDirectoryEntries = uint16("actual_max_used_directory_entries", "Actual Max Used Directory Entries")
ActualMaxUsedRoutingBuffers = uint16("actual_max_used_routing_buffers", "Actual Max Used Routing Buffers")
ActualResponseCount = uint16("actual_response_count", "Actual Response Count")
AddNameSpaceAndVol = stringz("add_nm_spc_and_vol", "Add Name Space and Volume")
AFPEntryID = uint32("afp_entry_id", "AFP Entry ID", ENC_BIG_ENDIAN)
AFPEntryID.Display("BASE_HEX")
AllocAvailByte = uint32("alloc_avail_byte", "Bytes Available for Allocation")
AllocateMode = bitfield16("alloc_mode", "Allocate Mode", [
bf_val_str16(0x0001, "alloc_dir_hdl", "Dir Handle Type",[
[0x00, "Permanent"],
[0x01, "Temporary"],
]),
bf_boolean16(0x0002, "alloc_spec_temp_dir_hdl","Special Temporary Directory Handle"),
bf_boolean16(0x4000, "alloc_reply_lvl2","Reply Level 2"),
bf_boolean16(0x8000, "alloc_dst_name_spc","Destination Name Space Input Parameter"),
])
AllocationBlockSize = uint32("allocation_block_size", "Allocation Block Size")
AllocFreeCount = uint32("alloc_free_count", "Reclaimable Free Bytes")
ApplicationNumber = uint16("application_number", "Application Number")
ArchivedTime = uint16("archived_time", "Archived Time")
ArchivedTime.NWTime()
ArchivedDate = uint16("archived_date", "Archived Date")
ArchivedDate.NWDate()
ArchiverID = uint32("archiver_id", "Archiver ID", ENC_BIG_ENDIAN)
ArchiverID.Display("BASE_HEX")
AssociatedNameSpace = uint8("associated_name_space", "Associated Name Space")
AttachDuringProcessing = uint16("attach_during_processing", "Attach During Processing")
AttachedIndexedFiles = uint8("attached_indexed_files", "Attached Indexed Files")
AttachWhileProcessingAttach = uint16("attach_while_processing_attach", "Attach While Processing Attach")
Attributes = uint32("attributes", "Attributes")
AttributesDef = bitfield8("attr_def", "Attributes", [
bf_boolean8(0x01, "att_def_ro", "Read Only"),
bf_boolean8(0x02, "att_def_hidden", "Hidden"),
bf_boolean8(0x04, "att_def_system", "System"),
bf_boolean8(0x08, "att_def_execute", "Execute"),
bf_boolean8(0x10, "att_def_sub_only", "Subdirectory"),
bf_boolean8(0x20, "att_def_archive", "Archive"),
bf_boolean8(0x80, "att_def_shareable", "Shareable"),
])
AttributesDef16 = bitfield16("attr_def_16", "Attributes", [
bf_boolean16(0x0001, "att_def16_ro", "Read Only"),
bf_boolean16(0x0002, "att_def16_hidden", "Hidden"),
bf_boolean16(0x0004, "att_def16_system", "System"),
bf_boolean16(0x0008, "att_def16_execute", "Execute"),
bf_boolean16(0x0010, "att_def16_sub_only", "Subdirectory"),
bf_boolean16(0x0020, "att_def16_archive", "Archive"),
bf_boolean16(0x0080, "att_def16_shareable", "Shareable"),
bf_boolean16(0x1000, "att_def16_transaction", "Transactional"),
bf_boolean16(0x4000, "att_def16_read_audit", "Read Audit"),
bf_boolean16(0x8000, "att_def16_write_audit", "Write Audit"),
])
AttributesDef32 = bitfield32("attr_def_32", "Attributes", [
bf_boolean32(0x00000001, "att_def32_ro", "Read Only"),
bf_boolean32(0x00000002, "att_def32_hidden", "Hidden"),
bf_boolean32(0x00000004, "att_def32_system", "System"),
bf_boolean32(0x00000008, "att_def32_execute", "Execute"),
bf_boolean32(0x00000010, "att_def32_sub_only", "Subdirectory"),
bf_boolean32(0x00000020, "att_def32_archive", "Archive"),
bf_boolean32(0x00000040, "att_def32_execute_confirm", "Execute Confirm"),
bf_boolean32(0x00000080, "att_def32_shareable", "Shareable"),
bf_val_str32(0x00000700, "att_def32_search", "Search Mode",[
[0, "Search on all Read Only Opens"],
[1, "Search on Read Only Opens with no Path"],
[2, "Shell Default Search Mode"],
[3, "Search on all Opens with no Path"],
[4, "Do not Search"],
[5, "Reserved - Do not Use"],
[6, "Search on All Opens"],
[7, "Reserved - Do not Use"],
]),
bf_boolean32(0x00000800, "att_def32_no_suballoc", "No Suballoc"),
bf_boolean32(0x00001000, "att_def32_transaction", "Transactional"),
bf_boolean32(0x00004000, "att_def32_read_audit", "Read Audit"),
bf_boolean32(0x00008000, "att_def32_write_audit", "Write Audit"),
bf_boolean32(0x00010000, "att_def32_purge", "Immediate Purge"),
bf_boolean32(0x00020000, "att_def32_reninhibit", "Rename Inhibit"),
bf_boolean32(0x00040000, "att_def32_delinhibit", "Delete Inhibit"),
bf_boolean32(0x00080000, "att_def32_cpyinhibit", "Copy Inhibit"),
bf_boolean32(0x00100000, "att_def32_file_audit", "File Audit"),
bf_boolean32(0x00200000, "att_def32_reserved", "Reserved"),
bf_boolean32(0x00400000, "att_def32_data_migrate", "Data Migrated"),
bf_boolean32(0x00800000, "att_def32_inhibit_dm", "Inhibit Data Migration"),
bf_boolean32(0x01000000, "att_def32_dm_save_key", "Data Migration Save Key"),
bf_boolean32(0x02000000, "att_def32_im_comp", "Immediate Compress"),
bf_boolean32(0x04000000, "att_def32_comp", "Compressed"),
bf_boolean32(0x08000000, "att_def32_comp_inhibit", "Inhibit Compression"),
bf_boolean32(0x10000000, "att_def32_reserved2", "Reserved"),
bf_boolean32(0x20000000, "att_def32_cant_compress", "Can't Compress"),
bf_boolean32(0x40000000, "att_def32_attr_archive", "Archive Attributes"),
bf_boolean32(0x80000000, "att_def32_reserved3", "Reserved"),
])
AttributeValidFlag = uint32("attribute_valid_flag", "Attribute Valid Flag")
AuditFileVersionDate = uint16("audit_file_ver_date", "Audit File Version Date")
AuditFileVersionDate.NWDate()
AuditFlag = val_string8("audit_flag", "Audit Flag", [
[ 0x00, "Do NOT audit object" ],
[ 0x01, "Audit object" ],
])
AuditHandle = uint32("audit_handle", "Audit File Handle")
AuditHandle.Display("BASE_HEX")
AuditID = uint32("audit_id", "Audit ID", ENC_BIG_ENDIAN)
AuditID.Display("BASE_HEX")
AuditIDType = val_string16("audit_id_type", "Audit ID Type", [
[ 0x0000, "Volume" ],
[ 0x0001, "Container" ],
])
AuditVersionDate = uint16("audit_ver_date", "Auditing Version Date")
AuditVersionDate.NWDate()
AvailableBlocks = uint32("available_blocks", "Available Blocks")
AvailableBlocks64 = uint64("available_blocks64", "Available Blocks")
AvailableClusters = uint16("available_clusters", "Available Clusters")
AvailableDirectorySlots = uint16("available_directory_slots", "Available Directory Slots")
AvailableDirEntries = uint32("available_dir_entries", "Available Directory Entries")
AvailableDirEntries64 = uint64("available_dir_entries64", "Available Directory Entries")
AvailableIndexedFiles = uint16("available_indexed_files", "Available Indexed Files")
BackgroundAgedWrites = uint32("background_aged_writes", "Background Aged Writes")
BackgroundDirtyWrites = uint32("background_dirty_writes", "Background Dirty Writes")
BadLogicalConnectionCount = uint16("bad_logical_connection_count", "Bad Logical Connection Count")
BannerName = fw_string("banner_name", "Banner Name", 14)
BaseDirectoryID = uint32("base_directory_id", "Base Directory ID", ENC_BIG_ENDIAN)
BaseDirectoryID.Display("BASE_HEX")
binderyContext = nstring8("bindery_context", "Bindery Context")
BitMap = bytes("bit_map", "Bit Map", 512)
BlockNumber = uint32("block_number", "Block Number")
BlockSize = uint16("block_size", "Block Size")
BlockSizeInSectors = uint32("block_size_in_sectors", "Block Size in Sectors")
BoardInstalled = uint8("board_installed", "Board Installed")
BoardNumber = uint32("board_number", "Board Number")
BoardNumbers = uint32("board_numbers", "Board Numbers")
BufferSize = uint16("buffer_size", "Buffer Size")
BusString = stringz("bus_string", "Bus String")
BusType = val_string8("bus_type", "Bus Type", [
[0x00, "ISA"],
[0x01, "Micro Channel" ],
[0x02, "EISA"],
[0x04, "PCI"],
[0x08, "PCMCIA"],
[0x10, "ISA"],
[0x14, "ISA/PCI"],
])
BytesActuallyTransferred = uint32("bytes_actually_transferred", "Bytes Actually Transferred")
BytesActuallyTransferred64bit = uint64("bytes_actually_transferred_64", "Bytes Actually Transferred", ENC_LITTLE_ENDIAN)
BytesActuallyTransferred64bit.Display("BASE_DEC")
BytesRead = fw_string("bytes_read", "Bytes Read", 6)
BytesToCopy = uint32("bytes_to_copy", "Bytes to Copy")
BytesToCopy64bit = uint64("bytes_to_copy_64", "Bytes to Copy")
BytesToCopy64bit.Display("BASE_DEC")
BytesWritten = fw_string("bytes_written", "Bytes Written", 6)
CacheAllocations = uint32("cache_allocations", "Cache Allocations")
CacheBlockScrapped = uint16("cache_block_scrapped", "Cache Block Scrapped")
CacheBufferCount = uint16("cache_buffer_count", "Cache Buffer Count")
CacheBufferSize = uint16("cache_buffer_size", "Cache Buffer Size")
CacheFullWriteRequests = uint32("cache_full_write_requests", "Cache Full Write Requests")
CacheGetRequests = uint32("cache_get_requests", "Cache Get Requests")
CacheHitOnUnavailableBlock = uint16("cache_hit_on_unavailable_block", "Cache Hit On Unavailable Block")
CacheHits = uint32("cache_hits", "Cache Hits")
CacheMisses = uint32("cache_misses", "Cache Misses")
CachePartialWriteRequests = uint32("cache_partial_write_requests", "Cache Partial Write Requests")
CacheReadRequests = uint32("cache_read_requests", "Cache Read Requests")
CacheWriteRequests = uint32("cache_write_requests", "Cache Write Requests")
CategoryName = stringz("category_name", "Category Name")
CCFileHandle = uint32("cc_file_handle", "File Handle")
CCFileHandle.Display("BASE_HEX")
CCFunction = val_string8("cc_function", "OP-Lock Flag", [
[ 0x01, "Clear OP-Lock" ],
[ 0x02, "Acknowledge Callback" ],
[ 0x03, "Decline Callback" ],
[ 0x04, "Level 2" ],
])
ChangeBits = bitfield16("change_bits", "Change Bits", [
bf_boolean16(0x0001, "change_bits_modify", "Modify Name"),
bf_boolean16(0x0002, "change_bits_fatt", "File Attributes"),
bf_boolean16(0x0004, "change_bits_cdate", "Creation Date"),
bf_boolean16(0x0008, "change_bits_ctime", "Creation Time"),
bf_boolean16(0x0010, "change_bits_owner", "Owner ID"),
bf_boolean16(0x0020, "change_bits_adate", "Archive Date"),
bf_boolean16(0x0040, "change_bits_atime", "Archive Time"),
bf_boolean16(0x0080, "change_bits_aid", "Archiver ID"),
bf_boolean16(0x0100, "change_bits_udate", "Update Date"),
bf_boolean16(0x0200, "change_bits_utime", "Update Time"),
bf_boolean16(0x0400, "change_bits_uid", "Update ID"),
bf_boolean16(0x0800, "change_bits_acc_date", "Access Date"),
bf_boolean16(0x1000, "change_bits_max_acc_mask", "Maximum Access Mask"),
bf_boolean16(0x2000, "change_bits_max_space", "Maximum Space"),
])
ChannelState = val_string8("channel_state", "Channel State", [
[ 0x00, "Channel is running" ],
[ 0x01, "Channel is stopping" ],
[ 0x02, "Channel is stopped" ],
[ 0x03, "Channel is not functional" ],
])
ChannelSynchronizationState = val_string8("channel_synchronization_state", "Channel Synchronization State", [
[ 0x00, "Channel is not being used" ],
[ 0x02, "NetWare is using the channel; no one else wants it" ],
[ 0x04, "NetWare is using the channel; someone else wants it" ],
[ 0x06, "Someone else is using the channel; NetWare does not need it" ],
[ 0x08, "Someone else is using the channel; NetWare needs it" ],
[ 0x0A, "Someone else has released the channel; NetWare should use it" ],
])
ChargeAmount = uint32("charge_amount", "Charge Amount")
ChargeInformation = uint32("charge_information", "Charge Information")
ClientCompFlag = val_string16("client_comp_flag", "Completion Flag", [
[ 0x0000, "Successful" ],
[ 0x0001, "Illegal Station Number" ],
[ 0x0002, "Client Not Logged In" ],
[ 0x0003, "Client Not Accepting Messages" ],
[ 0x0004, "Client Already has a Message" ],
[ 0x0096, "No Alloc Space for the Message" ],
[ 0x00fd, "Bad Station Number" ],
[ 0x00ff, "Failure" ],
])
ClientIDNumber = uint32("client_id_number", "Client ID Number", ENC_BIG_ENDIAN)
ClientIDNumber.Display("BASE_HEX")
ClientList = uint32("client_list", "Client List")
ClientListCount = uint16("client_list_cnt", "Client List Count")
ClientListLen = uint8("client_list_len", "Client List Length")
ClientName = nstring8("client_name", "Client Name")
ClientRecordArea = fw_string("client_record_area", "Client Record Area", 152)
ClientStation = uint8("client_station", "Client Station")
ClientStationLong = uint32("client_station_long", "Client Station")
ClientTaskNumber = uint8("client_task_number", "Client Task Number")
ClientTaskNumberLong = uint32("client_task_number_long", "Client Task Number")
ClusterCount = uint16("cluster_count", "Cluster Count")
ClustersUsedByDirectories = uint32("clusters_used_by_directories", "Clusters Used by Directories")
ClustersUsedByExtendedDirectories = uint32("clusters_used_by_extended_dirs", "Clusters Used by Extended Directories")
ClustersUsedByFAT = uint32("clusters_used_by_fat", "Clusters Used by FAT")
CodePage = uint32("code_page", "Code Page")
ComCnts = uint16("com_cnts", "Communication Counters")
Comment = nstring8("comment", "Comment")
CommentType = uint16("comment_type", "Comment Type")
CompletionCode = uint32("ncompletion_code", "Completion Code")
CompressedDataStreamsCount = uint32("compressed_data_streams_count", "Compressed Data Streams Count")
CompressedLimboDataStreamsCount = uint32("compressed_limbo_data_streams_count", "Compressed Limbo Data Streams Count")
CompressedSectors = uint32("compressed_sectors", "Compressed Sectors")
compressionStage = uint32("compression_stage", "Compression Stage")
compressVolume = uint32("compress_volume", "Volume Compression")
ConfigMajorVN = uint8("config_major_vn", "Configuration Major Version Number")
ConfigMinorVN = uint8("config_minor_vn", "Configuration Minor Version Number")
ConfigurationDescription = fw_string("configuration_description", "Configuration Description", 80)
ConfigurationText = fw_string("configuration_text", "Configuration Text", 160)
ConfiguredMaxBinderyObjects = uint16("configured_max_bindery_objects", "Configured Max Bindery Objects")
ConfiguredMaxOpenFiles = uint16("configured_max_open_files", "Configured Max Open Files")
ConfiguredMaxRoutingBuffers = uint16("configured_max_routing_buffers", "Configured Max Routing Buffers")
ConfiguredMaxSimultaneousTransactions = uint16("cfg_max_simultaneous_transactions", "Configured Max Simultaneous Transactions")
ConnectedLAN = uint32("connected_lan", "LAN Adapter")
ConnectionControlBits = bitfield8("conn_ctrl_bits", "Connection Control", [
bf_boolean8(0x01, "enable_brdcasts", "Enable Broadcasts"),
bf_boolean8(0x02, "enable_personal_brdcasts", "Enable Personal Broadcasts"),
bf_boolean8(0x04, "enable_wdog_messages", "Enable Watchdog Message"),
bf_boolean8(0x10, "disable_brdcasts", "Disable Broadcasts"),
bf_boolean8(0x20, "disable_personal_brdcasts", "Disable Personal Broadcasts"),
bf_boolean8(0x40, "disable_wdog_messages", "Disable Watchdog Message"),
])
ConnectionListCount = uint32("conn_list_count", "Connection List Count")
ConnectionList = uint32("connection_list", "Connection List")
ConnectionNumber = uint32("connection_number", "Connection Number", ENC_BIG_ENDIAN)
ConnectionNumberList = nstring8("connection_number_list", "Connection Number List")
ConnectionNumberWord = uint16("conn_number_word", "Connection Number")
ConnectionNumberByte = uint8("conn_number_byte", "Connection Number")
ConnectionServiceType = val_string8("connection_service_type","Connection Service Type",[
[ 0x01, "CLIB backward Compatibility" ],
[ 0x02, "NCP Connection" ],
[ 0x03, "NLM Connection" ],
[ 0x04, "AFP Connection" ],
[ 0x05, "FTAM Connection" ],
[ 0x06, "ANCP Connection" ],
[ 0x07, "ACP Connection" ],
[ 0x08, "SMB Connection" ],
[ 0x09, "Winsock Connection" ],
])
ConnectionsInUse = uint16("connections_in_use", "Connections In Use")
ConnectionsMaxUsed = uint16("connections_max_used", "Connections Max Used")
ConnectionsSupportedMax = uint16("connections_supported_max", "Connections Supported Max")
ConnectionType = val_string8("connection_type", "Connection Type", [
[ 0x00, "Not in use" ],
[ 0x02, "NCP" ],
[ 0x0b, "UDP (for IP)" ],
])
ConnListLen = uint8("conn_list_len", "Connection List Length")
connList = uint32("conn_list", "Connection List")
ControlFlags = val_string8("control_flags", "Control Flags", [
[ 0x00, "Forced Record Locking is Off" ],
[ 0x01, "Forced Record Locking is On" ],
])
ControllerDriveNumber = uint8("controller_drive_number", "Controller Drive Number")
ControllerNumber = uint8("controller_number", "Controller Number")
ControllerType = uint8("controller_type", "Controller Type")
Cookie1 = uint32("cookie_1", "Cookie 1")
Cookie2 = uint32("cookie_2", "Cookie 2")
Copies = uint8( "copies", "Copies" )
CoprocessorFlag = uint32("co_processor_flag", "CoProcessor Present Flag")
CoProcessorString = stringz("co_proc_string", "CoProcessor String")
CounterMask = val_string8("counter_mask", "Counter Mask", [
[ 0x00, "Counter is Valid" ],
[ 0x01, "Counter is not Valid" ],
])
CPUNumber = uint32("cpu_number", "CPU Number")
CPUString = stringz("cpu_string", "CPU String")
CPUType = val_string8("cpu_type", "CPU Type", [
[ 0x00, "80386" ],
[ 0x01, "80486" ],
[ 0x02, "Pentium" ],
[ 0x03, "Pentium Pro" ],
])
CreationDate = uint16("creation_date", "Creation Date")
CreationDate.NWDate()
CreationTime = uint16("creation_time", "Creation Time")
CreationTime.NWTime()
CreatorID = uint32("creator_id", "Creator ID", ENC_BIG_ENDIAN)
CreatorID.Display("BASE_HEX")
CreatorNameSpaceNumber = val_string8("creator_name_space_number", "Creator Name Space Number", [
[ 0x00, "DOS Name Space" ],
[ 0x01, "MAC Name Space" ],
[ 0x02, "NFS Name Space" ],
[ 0x04, "Long Name Space" ],
])
CreditLimit = uint32("credit_limit", "Credit Limit")
CtrlFlags = val_string16("ctrl_flags", "Control Flags", [
[ 0x0000, "Do Not Return File Name" ],
[ 0x0001, "Return File Name" ],
])
curCompBlks = uint32("cur_comp_blks", "Current Compression Blocks")
curInitialBlks = uint32("cur_initial_blks", "Current Initial Blocks")
curIntermediateBlks = uint32("cur_inter_blks", "Current Intermediate Blocks")
CurNumOfRTags = uint32("cur_num_of_r_tags", "Current Number of Resource Tags")
CurrentBlockBeingDecompressed = uint32("cur_blk_being_dcompress", "Current Block Being Decompressed")
CurrentChangedFATs = uint16("current_changed_fats", "Current Changed FAT Entries")
CurrentEntries = uint32("current_entries", "Current Entries")
CurrentFormType = uint8( "current_form_type", "Current Form Type" )
CurrentLFSCounters = uint32("current_lfs_counters", "Current LFS Counters")
CurrentlyUsedRoutingBuffers = uint16("currently_used_routing_buffers", "Currently Used Routing Buffers")
CurrentOpenFiles = uint16("current_open_files", "Current Open Files")
CurrentReferenceID = uint16("curr_ref_id", "Current Reference ID")
CurrentServers = uint32("current_servers", "Current Servers")
CurrentServerTime = uint32("current_server_time", "Time Elapsed Since Server Was Brought Up")
CurrentSpace = uint32("current_space", "Current Space")
CurrentTransactionCount = uint32("current_trans_count", "Current Transaction Count")
CurrentUsedBinderyObjects = uint16("current_used_bindery_objects", "Current Used Bindery Objects")
CurrentUsedDynamicSpace = uint32("current_used_dynamic_space", "Current Used Dynamic Space")
CustomCnts = uint32("custom_cnts", "Custom Counters")
CustomCount = uint32("custom_count", "Custom Count")
CustomCounters = uint32("custom_counters", "Custom Counters")
CustomString = nstring8("custom_string", "Custom String")
CustomVariableValue = uint32("custom_var_value", "Custom Variable Value")
Data = nstring8("data", "Data")
Data64 = stringz("data64", "Data")
DataForkFirstFAT = uint32("data_fork_first_fat", "Data Fork First FAT Entry")
DataForkLen = uint32("data_fork_len", "Data Fork Len")
DataForkSize = uint32("data_fork_size", "Data Fork Size")
DataSize = uint32("data_size", "Data Size")
DataStream = val_string8("data_stream", "Data Stream", [
[ 0x00, "Resource Fork or DOS" ],
[ 0x01, "Data Fork" ],
])
DataStreamFATBlocks = uint32("data_stream_fat_blks", "Data Stream FAT Blocks")
DataStreamName = nstring8("data_stream_name", "Data Stream Name")
DataStreamNumber = uint8("data_stream_number", "Data Stream Number")
DataStreamNumberLong = uint32("data_stream_num_long", "Data Stream Number")
DataStreamsCount = uint32("data_streams_count", "Data Streams Count")
DataStreamSize = uint32("data_stream_size", "Size")
DataStreamSize64 = uint64("data_stream_size_64", "Size")
DataStreamSpaceAlloc = uint32( "data_stream_space_alloc", "Space Allocated for Data Stream" )
DataTypeFlag = val_string8("data_type_flag", "Data Type Flag", [
[ 0x00, "ASCII Data" ],
[ 0x01, "UTF8 Data" ],
])
Day = uint8("s_day", "Day")
DayOfWeek = val_string8("s_day_of_week", "Day of Week", [
[ 0x00, "Sunday" ],
[ 0x01, "Monday" ],
[ 0x02, "Tuesday" ],
[ 0x03, "Wednesday" ],
[ 0x04, "Thursday" ],
[ 0x05, "Friday" ],
[ 0x06, "Saturday" ],
])
DeadMirrorTable = bytes("dead_mirror_table", "Dead Mirror Table", 32)
DefinedDataStreams = uint8("defined_data_streams", "Defined Data Streams")
DefinedNameSpaces = uint8("defined_name_spaces", "Defined Name Spaces")
DeletedDate = uint16("deleted_date", "Deleted Date")
DeletedDate.NWDate()
DeletedFileTime = uint32( "deleted_file_time", "Deleted File Time")
DeletedFileTime.Display("BASE_HEX")
DeletedTime = uint16("deleted_time", "Deleted Time")
DeletedTime.NWTime()
DeletedID = uint32( "delete_id", "Deleted ID", ENC_BIG_ENDIAN)
DeletedID.Display("BASE_HEX")
DeleteExistingFileFlag = val_string8("delete_existing_file_flag", "Delete Existing File Flag", [
[ 0x00, "Do Not Delete Existing File" ],
[ 0x01, "Delete Existing File" ],
])
DenyReadCount = uint16("deny_read_count", "Deny Read Count")
DenyWriteCount = uint16("deny_write_count", "Deny Write Count")
DescriptionStrings = fw_string("description_string", "Description", 100)
DesiredAccessRights = bitfield16("desired_access_rights", "Desired Access Rights", [
bf_boolean16(0x0001, "dsired_acc_rights_read_o", "Read Only"),
bf_boolean16(0x0002, "dsired_acc_rights_write_o", "Write Only"),
bf_boolean16(0x0004, "dsired_acc_rights_deny_r", "Deny Read"),
bf_boolean16(0x0008, "dsired_acc_rights_deny_w", "Deny Write"),
bf_boolean16(0x0010, "dsired_acc_rights_compat", "Compatibility"),
bf_boolean16(0x0040, "dsired_acc_rights_w_thru", "File Write Through"),
bf_boolean16(0x0400, "dsired_acc_rights_del_file_cls", "Delete File Close"),
])
DesiredResponseCount = uint16("desired_response_count", "Desired Response Count")
DestDirHandle = uint8("dest_dir_handle", "Destination Directory Handle")
DestNameSpace = val_string8("dest_name_space", "Destination Name Space", [
[ 0x00, "DOS Name Space" ],
[ 0x01, "MAC Name Space" ],
[ 0x02, "NFS Name Space" ],
[ 0x04, "Long Name Space" ],
])
DestPathComponentCount = uint8("dest_component_count", "Destination Path Component Count")
DestPath = nstring8("dest_path", "Destination Path")
DestPath16 = nstring16("dest_path_16", "Destination Path")
DetachDuringProcessing = uint16("detach_during_processing", "Detach During Processing")
DetachForBadConnectionNumber = uint16("detach_for_bad_connection_number", "Detach For Bad Connection Number")
DirHandle = uint8("dir_handle", "Directory Handle")
DirHandleName = uint8("dir_handle_name", "Handle Name")
DirHandleLong = uint32("dir_handle_long", "Directory Handle")
DirHandle64 = uint64("dir_handle64", "Directory Handle")
DirectoryAccessRights = uint8("directory_access_rights", "Directory Access Rights")
#
# XXX - what do the bits mean here?
#
DirectoryAttributes = uint8("directory_attributes", "Directory Attributes")
DirectoryBase = uint32("dir_base", "Directory Base")
DirectoryBase.Display("BASE_HEX")
DirectoryCount = uint16("dir_count", "Directory Count")
DirectoryEntryNumber = uint32("directory_entry_number", "Directory Entry Number")
DirectoryEntryNumber.Display('BASE_HEX')
DirectoryEntryNumberWord = uint16("directory_entry_number_word", "Directory Entry Number")
DirectoryID = uint16("directory_id", "Directory ID", ENC_BIG_ENDIAN)
DirectoryID.Display("BASE_HEX")
DirectoryName = fw_string("directory_name", "Directory Name",12)
DirectoryName14 = fw_string("directory_name_14", "Directory Name", 14)
DirectoryNameLen = uint8("directory_name_len", "Directory Name Length")
DirectoryNumber = uint32("directory_number", "Directory Number")
DirectoryNumber.Display("BASE_HEX")
DirectoryPath = fw_string("directory_path", "Directory Path", 16)
DirectoryServicesObjectID = uint32("directory_services_object_id", "Directory Services Object ID")
DirectoryServicesObjectID.Display("BASE_HEX")
DirectoryStamp = uint16("directory_stamp", "Directory Stamp (0xD1D1)")
DirtyCacheBuffers = uint16("dirty_cache_buffers", "Dirty Cache Buffers")
DiskChannelNumber = uint8("disk_channel_number", "Disk Channel Number")
DiskChannelTable = val_string8("disk_channel_table", "Disk Channel Table", [
[ 0x01, "XT" ],
[ 0x02, "AT" ],
[ 0x03, "SCSI" ],
[ 0x04, "Disk Coprocessor" ],
])
DiskSpaceLimit = uint32("disk_space_limit", "Disk Space Limit")
DiskSpaceLimit64 = uint64("data_stream_size_64", "Size")
DMAChannelsUsed = uint32("dma_channels_used", "DMA Channels Used")
DMInfoEntries = uint32("dm_info_entries", "DM Info Entries")
DMInfoLevel = val_string8("dm_info_level", "DM Info Level", [
[ 0x00, "Return Detailed DM Support Module Information" ],
[ 0x01, "Return Number of DM Support Modules" ],
[ 0x02, "Return DM Support Modules Names" ],
])
DMFlags = val_string8("dm_flags", "DM Flags", [
[ 0x00, "OnLine Media" ],
[ 0x01, "OffLine Media" ],
])
DMmajorVersion = uint32("dm_major_version", "DM Major Version")
DMminorVersion = uint32("dm_minor_version", "DM Minor Version")
DMPresentFlag = val_string8("dm_present_flag", "Data Migration Present Flag", [
[ 0x00, "Data Migration NLM is not loaded" ],
[ 0x01, "Data Migration NLM has been loaded and is running" ],
])
DOSDirectoryBase = uint32("dos_directory_base", "DOS Directory Base")
DOSDirectoryBase.Display("BASE_HEX")
DOSDirectoryEntry = uint32("dos_directory_entry", "DOS Directory Entry")
DOSDirectoryEntry.Display("BASE_HEX")
DOSDirectoryEntryNumber = uint32("dos_directory_entry_number", "DOS Directory Entry Number")
DOSDirectoryEntryNumber.Display('BASE_HEX')
DOSFileAttributes = uint8("dos_file_attributes", "DOS File Attributes")
DOSParentDirectoryEntry = uint32("dos_parent_directory_entry", "DOS Parent Directory Entry")
DOSParentDirectoryEntry.Display('BASE_HEX')
DOSSequence = uint32("dos_sequence", "DOS Sequence")
DriveCylinders = uint16("drive_cylinders", "Drive Cylinders")
DriveDefinitionString = fw_string("drive_definition_string", "Drive Definition", 64)
DriveHeads = uint8("drive_heads", "Drive Heads")
DriveMappingTable = bytes("drive_mapping_table", "Drive Mapping Table", 32)
DriveMirrorTable = bytes("drive_mirror_table", "Drive Mirror Table", 32)
DriverBoardName = stringz("driver_board_name", "Driver Board Name")
DriveRemovableFlag = val_string8("drive_removable_flag", "Drive Removable Flag", [
[ 0x00, "Nonremovable" ],
[ 0xff, "Removable" ],
])
DriverLogicalName = stringz("driver_log_name", "Driver Logical Name")
DriverShortName = stringz("driver_short_name", "Driver Short Name")
DriveSize = uint32("drive_size", "Drive Size")
DstEAFlags = val_string16("dst_ea_flags", "Destination EA Flags", [
[ 0x0000, "Return EAHandle,Information Level 0" ],
[ 0x0001, "Return NetWareHandle,Information Level 0" ],
[ 0x0002, "Return Volume/Directory Number,Information Level 0" ],
[ 0x0004, "Return EAHandle,Close Handle on Error,Information Level 0" ],
[ 0x0005, "Return NetWareHandle,Close Handle on Error,Information Level 0" ],
[ 0x0006, "Return Volume/Directory Number,Close Handle on Error,Information Level 0" ],
[ 0x0010, "Return EAHandle,Information Level 1" ],
[ 0x0011, "Return NetWareHandle,Information Level 1" ],
[ 0x0012, "Return Volume/Directory Number,Information Level 1" ],
[ 0x0014, "Return EAHandle,Close Handle on Error,Information Level 1" ],
[ 0x0015, "Return NetWareHandle,Close Handle on Error,Information Level 1" ],
[ 0x0016, "Return Volume/Directory Number,Close Handle on Error,Information Level 1" ],
[ 0x0020, "Return EAHandle,Information Level 2" ],
[ 0x0021, "Return NetWareHandle,Information Level 2" ],
[ 0x0022, "Return Volume/Directory Number,Information Level 2" ],
[ 0x0024, "Return EAHandle,Close Handle on Error,Information Level 2" ],
[ 0x0025, "Return NetWareHandle,Close Handle on Error,Information Level 2" ],
[ 0x0026, "Return Volume/Directory Number,Close Handle on Error,Information Level 2" ],
[ 0x0030, "Return EAHandle,Information Level 3" ],
[ 0x0031, "Return NetWareHandle,Information Level 3" ],
[ 0x0032, "Return Volume/Directory Number,Information Level 3" ],
[ 0x0034, "Return EAHandle,Close Handle on Error,Information Level 3" ],
[ 0x0035, "Return NetWareHandle,Close Handle on Error,Information Level 3" ],
[ 0x0036, "Return Volume/Directory Number,Close Handle on Error,Information Level 3" ],
[ 0x0040, "Return EAHandle,Information Level 4" ],
[ 0x0041, "Return NetWareHandle,Information Level 4" ],
[ 0x0042, "Return Volume/Directory Number,Information Level 4" ],
[ 0x0044, "Return EAHandle,Close Handle on Error,Information Level 4" ],
[ 0x0045, "Return NetWareHandle,Close Handle on Error,Information Level 4" ],
[ 0x0046, "Return Volume/Directory Number,Close Handle on Error,Information Level 4" ],
[ 0x0050, "Return EAHandle,Information Level 5" ],
[ 0x0051, "Return NetWareHandle,Information Level 5" ],
[ 0x0052, "Return Volume/Directory Number,Information Level 5" ],
[ 0x0054, "Return EAHandle,Close Handle on Error,Information Level 5" ],
[ 0x0055, "Return NetWareHandle,Close Handle on Error,Information Level 5" ],
[ 0x0056, "Return Volume/Directory Number,Close Handle on Error,Information Level 5" ],
[ 0x0060, "Return EAHandle,Information Level 6" ],
[ 0x0061, "Return NetWareHandle,Information Level 6" ],
[ 0x0062, "Return Volume/Directory Number,Information Level 6" ],
[ 0x0064, "Return EAHandle,Close Handle on Error,Information Level 6" ],
[ 0x0065, "Return NetWareHandle,Close Handle on Error,Information Level 6" ],
[ 0x0066, "Return Volume/Directory Number,Close Handle on Error,Information Level 6" ],
[ 0x0070, "Return EAHandle,Information Level 7" ],
[ 0x0071, "Return NetWareHandle,Information Level 7" ],
[ 0x0072, "Return Volume/Directory Number,Information Level 7" ],
[ 0x0074, "Return EAHandle,Close Handle on Error,Information Level 7" ],
[ 0x0075, "Return NetWareHandle,Close Handle on Error,Information Level 7" ],
[ 0x0076, "Return Volume/Directory Number,Close Handle on Error,Information Level 7" ],
[ 0x0080, "Return EAHandle,Information Level 0,Immediate Close Handle" ],
[ 0x0081, "Return NetWareHandle,Information Level 0,Immediate Close Handle" ],
[ 0x0082, "Return Volume/Directory Number,Information Level 0,Immediate Close Handle" ],
[ 0x0084, "Return EAHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
[ 0x0085, "Return NetWareHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
[ 0x0086, "Return Volume/Directory Number,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
[ 0x0090, "Return EAHandle,Information Level 1,Immediate Close Handle" ],
[ 0x0091, "Return NetWareHandle,Information Level 1,Immediate Close Handle" ],
[ 0x0092, "Return Volume/Directory Number,Information Level 1,Immediate Close Handle" ],
[ 0x0094, "Return EAHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
[ 0x0095, "Return NetWareHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
[ 0x0096, "Return Volume/Directory Number,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
[ 0x00a0, "Return EAHandle,Information Level 2,Immediate Close Handle" ],
[ 0x00a1, "Return NetWareHandle,Information Level 2,Immediate Close Handle" ],
[ 0x00a2, "Return Volume/Directory Number,Information Level 2,Immediate Close Handle" ],
[ 0x00a4, "Return EAHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
[ 0x00a5, "Return NetWareHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
[ 0x00a6, "Return Volume/Directory Number,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
[ 0x00b0, "Return EAHandle,Information Level 3,Immediate Close Handle" ],
[ 0x00b1, "Return NetWareHandle,Information Level 3,Immediate Close Handle" ],
[ 0x00b2, "Return Volume/Directory Number,Information Level 3,Immediate Close Handle" ],
[ 0x00b4, "Return EAHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
[ 0x00b5, "Return NetWareHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
[ 0x00b6, "Return Volume/Directory Number,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
[ 0x00c0, "Return EAHandle,Information Level 4,Immediate Close Handle" ],
[ 0x00c1, "Return NetWareHandle,Information Level 4,Immediate Close Handle" ],
[ 0x00c2, "Return Volume/Directory Number,Information Level 4,Immediate Close Handle" ],
[ 0x00c4, "Return EAHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
[ 0x00c5, "Return NetWareHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
[ 0x00c6, "Return Volume/Directory Number,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
[ 0x00d0, "Return EAHandle,Information Level 5,Immediate Close Handle" ],
[ 0x00d1, "Return NetWareHandle,Information Level 5,Immediate Close Handle" ],
[ 0x00d2, "Return Volume/Directory Number,Information Level 5,Immediate Close Handle" ],
[ 0x00d4, "Return EAHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
[ 0x00d5, "Return NetWareHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
[ 0x00d6, "Return Volume/Directory Number,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
[ 0x00e0, "Return EAHandle,Information Level 6,Immediate Close Handle" ],
[ 0x00e1, "Return NetWareHandle,Information Level 6,Immediate Close Handle" ],
[ 0x00e2, "Return Volume/Directory Number,Information Level 6,Immediate Close Handle" ],
[ 0x00e4, "Return EAHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
[ 0x00e5, "Return NetWareHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
[ 0x00e6, "Return Volume/Directory Number,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
[ 0x00f0, "Return EAHandle,Information Level 7,Immediate Close Handle" ],
[ 0x00f1, "Return NetWareHandle,Information Level 7,Immediate Close Handle" ],
[ 0x00f2, "Return Volume/Directory Number,Information Level 7,Immediate Close Handle" ],
[ 0x00f4, "Return EAHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
[ 0x00f5, "Return NetWareHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
[ 0x00f6, "Return Volume/Directory Number,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
])
dstNSIndicator = val_string16("dst_ns_indicator", "Destination Name Space Indicator", [
[ 0x0000, "Return Source Name Space Information" ],
[ 0x0001, "Return Destination Name Space Information" ],
])
DstQueueID = uint32("dst_queue_id", "Destination Queue ID")
DuplicateRepliesSent = uint16("duplicate_replies_sent", "Duplicate Replies Sent")
EAAccessFlag = bitfield16("ea_access_flag", "EA Access Flag", [
bf_boolean16(0x0001, "ea_permanent_memory", "Permanent Memory"),
bf_boolean16(0x0002, "ea_deep_freeze", "Deep Freeze"),
bf_boolean16(0x0004, "ea_in_progress", "In Progress"),
bf_boolean16(0x0008, "ea_header_being_enlarged", "Header Being Enlarged"),
bf_boolean16(0x0010, "ea_new_tally_used", "New Tally Used"),
bf_boolean16(0x0020, "ea_tally_need_update", "Tally Need Update"),
bf_boolean16(0x0040, "ea_score_card_present", "Score Card Present"),
bf_boolean16(0x0080, "ea_need_bit_flag", "EA Need Bit Flag"),
bf_boolean16(0x0100, "ea_write_privileges", "Write Privileges"),
bf_boolean16(0x0200, "ea_read_privileges", "Read Privileges"),
bf_boolean16(0x0400, "ea_delete_privileges", "Delete Privileges"),
bf_boolean16(0x0800, "ea_system_ea_only", "System EA Only"),
bf_boolean16(0x1000, "ea_write_in_progress", "Write In Progress"),
])
EABytesWritten = uint32("ea_bytes_written", "Bytes Written")
EACount = uint32("ea_count", "Count")
EADataSize = uint32("ea_data_size", "Data Size")
EADataSizeDuplicated = uint32("ea_data_size_duplicated", "Data Size Duplicated")
EADuplicateCount = uint32("ea_duplicate_count", "Duplicate Count")
EAErrorCodes = val_string16("ea_error_codes", "EA Error Codes", [
[ 0x0000, "SUCCESSFUL" ],
[ 0x00c8, "ERR_MISSING_EA_KEY" ],
[ 0x00c9, "ERR_EA_NOT_FOUND" ],
[ 0x00ca, "ERR_INVALID_EA_HANDLE_TYPE" ],
[ 0x00cb, "ERR_EA_NO_KEY_NO_DATA" ],
[ 0x00cc, "ERR_EA_NUMBER_MISMATCH" ],
[ 0x00cd, "ERR_EXTENT_NUMBER_OUT_OF_RANGE" ],
[ 0x00ce, "ERR_EA_BAD_DIR_NUM" ],
[ 0x00cf, "ERR_INVALID_EA_HANDLE" ],
[ 0x00d0, "ERR_EA_POSITION_OUT_OF_RANGE" ],
[ 0x00d1, "ERR_EA_ACCESS_DENIED" ],
[ 0x00d2, "ERR_DATA_PAGE_ODD_SIZE" ],
[ 0x00d3, "ERR_EA_VOLUME_NOT_MOUNTED" ],
[ 0x00d4, "ERR_BAD_PAGE_BOUNDARY" ],
[ 0x00d5, "ERR_INSPECT_FAILURE" ],
[ 0x00d6, "ERR_EA_ALREADY_CLAIMED" ],
[ 0x00d7, "ERR_ODD_BUFFER_SIZE" ],
[ 0x00d8, "ERR_NO_SCORECARDS" ],
[ 0x00d9, "ERR_BAD_EDS_SIGNATURE" ],
[ 0x00da, "ERR_EA_SPACE_LIMIT" ],
[ 0x00db, "ERR_EA_KEY_CORRUPT" ],
[ 0x00dc, "ERR_EA_KEY_LIMIT" ],
[ 0x00dd, "ERR_TALLY_CORRUPT" ],
])
EAFlags = val_string16("ea_flags", "EA Flags", [
[ 0x0000, "Return EAHandle,Information Level 0" ],
[ 0x0001, "Return NetWareHandle,Information Level 0" ],
[ 0x0002, "Return Volume/Directory Number,Information Level 0" ],
[ 0x0004, "Return EAHandle,Close Handle on Error,Information Level 0" ],
[ 0x0005, "Return NetWareHandle,Close Handle on Error,Information Level 0" ],
[ 0x0006, "Return Volume/Directory Number,Close Handle on Error,Information Level 0" ],
[ 0x0010, "Return EAHandle,Information Level 1" ],
[ 0x0011, "Return NetWareHandle,Information Level 1" ],
[ 0x0012, "Return Volume/Directory Number,Information Level 1" ],
[ 0x0014, "Return EAHandle,Close Handle on Error,Information Level 1" ],
[ 0x0015, "Return NetWareHandle,Close Handle on Error,Information Level 1" ],
[ 0x0016, "Return Volume/Directory Number,Close Handle on Error,Information Level 1" ],
[ 0x0020, "Return EAHandle,Information Level 2" ],
[ 0x0021, "Return NetWareHandle,Information Level 2" ],
[ 0x0022, "Return Volume/Directory Number,Information Level 2" ],
[ 0x0024, "Return EAHandle,Close Handle on Error,Information Level 2" ],
[ 0x0025, "Return NetWareHandle,Close Handle on Error,Information Level 2" ],
[ 0x0026, "Return Volume/Directory Number,Close Handle on Error,Information Level 2" ],
[ 0x0030, "Return EAHandle,Information Level 3" ],
[ 0x0031, "Return NetWareHandle,Information Level 3" ],
[ 0x0032, "Return Volume/Directory Number,Information Level 3" ],
[ 0x0034, "Return EAHandle,Close Handle on Error,Information Level 3" ],
[ 0x0035, "Return NetWareHandle,Close Handle on Error,Information Level 3" ],
[ 0x0036, "Return Volume/Directory Number,Close Handle on Error,Information Level 3" ],
[ 0x0040, "Return EAHandle,Information Level 4" ],
[ 0x0041, "Return NetWareHandle,Information Level 4" ],
[ 0x0042, "Return Volume/Directory Number,Information Level 4" ],
[ 0x0044, "Return EAHandle,Close Handle on Error,Information Level 4" ],
[ 0x0045, "Return NetWareHandle,Close Handle on Error,Information Level 4" ],
[ 0x0046, "Return Volume/Directory Number,Close Handle on Error,Information Level 4" ],
[ 0x0050, "Return EAHandle,Information Level 5" ],
[ 0x0051, "Return NetWareHandle,Information Level 5" ],
[ 0x0052, "Return Volume/Directory Number,Information Level 5" ],
[ 0x0054, "Return EAHandle,Close Handle on Error,Information Level 5" ],
[ 0x0055, "Return NetWareHandle,Close Handle on Error,Information Level 5" ],
[ 0x0056, "Return Volume/Directory Number,Close Handle on Error,Information Level 5" ],
[ 0x0060, "Return EAHandle,Information Level 6" ],
[ 0x0061, "Return NetWareHandle,Information Level 6" ],
[ 0x0062, "Return Volume/Directory Number,Information Level 6" ],
[ 0x0064, "Return EAHandle,Close Handle on Error,Information Level 6" ],
[ 0x0065, "Return NetWareHandle,Close Handle on Error,Information Level 6" ],
[ 0x0066, "Return Volume/Directory Number,Close Handle on Error,Information Level 6" ],
[ 0x0070, "Return EAHandle,Information Level 7" ],
[ 0x0071, "Return NetWareHandle,Information Level 7" ],
[ 0x0072, "Return Volume/Directory Number,Information Level 7" ],
[ 0x0074, "Return EAHandle,Close Handle on Error,Information Level 7" ],
[ 0x0075, "Return NetWareHandle,Close Handle on Error,Information Level 7" ],
[ 0x0076, "Return Volume/Directory Number,Close Handle on Error,Information Level 7" ],
[ 0x0080, "Return EAHandle,Information Level 0,Immediate Close Handle" ],
[ 0x0081, "Return NetWareHandle,Information Level 0,Immediate Close Handle" ],
[ 0x0082, "Return Volume/Directory Number,Information Level 0,Immediate Close Handle" ],
[ 0x0084, "Return EAHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
[ 0x0085, "Return NetWareHandle,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
[ 0x0086, "Return Volume/Directory Number,Close Handle on Error,Information Level 0,Immediate Close Handle" ],
[ 0x0090, "Return EAHandle,Information Level 1,Immediate Close Handle" ],
[ 0x0091, "Return NetWareHandle,Information Level 1,Immediate Close Handle" ],
[ 0x0092, "Return Volume/Directory Number,Information Level 1,Immediate Close Handle" ],
[ 0x0094, "Return EAHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
[ 0x0095, "Return NetWareHandle,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
[ 0x0096, "Return Volume/Directory Number,Close Handle on Error,Information Level 1,Immediate Close Handle" ],
[ 0x00a0, "Return EAHandle,Information Level 2,Immediate Close Handle" ],
[ 0x00a1, "Return NetWareHandle,Information Level 2,Immediate Close Handle" ],
[ 0x00a2, "Return Volume/Directory Number,Information Level 2,Immediate Close Handle" ],
[ 0x00a4, "Return EAHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
[ 0x00a5, "Return NetWareHandle,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
[ 0x00a6, "Return Volume/Directory Number,Close Handle on Error,Information Level 2,Immediate Close Handle" ],
[ 0x00b0, "Return EAHandle,Information Level 3,Immediate Close Handle" ],
[ 0x00b1, "Return NetWareHandle,Information Level 3,Immediate Close Handle" ],
[ 0x00b2, "Return Volume/Directory Number,Information Level 3,Immediate Close Handle" ],
[ 0x00b4, "Return EAHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
[ 0x00b5, "Return NetWareHandle,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
[ 0x00b6, "Return Volume/Directory Number,Close Handle on Error,Information Level 3,Immediate Close Handle" ],
[ 0x00c0, "Return EAHandle,Information Level 4,Immediate Close Handle" ],
[ 0x00c1, "Return NetWareHandle,Information Level 4,Immediate Close Handle" ],
[ 0x00c2, "Return Volume/Directory Number,Information Level 4,Immediate Close Handle" ],
[ 0x00c4, "Return EAHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
[ 0x00c5, "Return NetWareHandle,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
[ 0x00c6, "Return Volume/Directory Number,Close Handle on Error,Information Level 4,Immediate Close Handle" ],
[ 0x00d0, "Return EAHandle,Information Level 5,Immediate Close Handle" ],
[ 0x00d1, "Return NetWareHandle,Information Level 5,Immediate Close Handle" ],
[ 0x00d2, "Return Volume/Directory Number,Information Level 5,Immediate Close Handle" ],
[ 0x00d4, "Return EAHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
[ 0x00d5, "Return NetWareHandle,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
[ 0x00d6, "Return Volume/Directory Number,Close Handle on Error,Information Level 5,Immediate Close Handle" ],
[ 0x00e0, "Return EAHandle,Information Level 6,Immediate Close Handle" ],
[ 0x00e1, "Return NetWareHandle,Information Level 6,Immediate Close Handle" ],
[ 0x00e2, "Return Volume/Directory Number,Information Level 6,Immediate Close Handle" ],
[ 0x00e4, "Return EAHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
[ 0x00e5, "Return NetWareHandle,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
[ 0x00e6, "Return Volume/Directory Number,Close Handle on Error,Information Level 6,Immediate Close Handle" ],
[ 0x00f0, "Return EAHandle,Information Level 7,Immediate Close Handle" ],
[ 0x00f1, "Return NetWareHandle,Information Level 7,Immediate Close Handle" ],
[ 0x00f2, "Return Volume/Directory Number,Information Level 7,Immediate Close Handle" ],
[ 0x00f4, "Return EAHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
[ 0x00f5, "Return NetWareHandle,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
[ 0x00f6, "Return Volume/Directory Number,Close Handle on Error,Information Level 7,Immediate Close Handle" ],
])
EAHandle = uint32("ea_handle", "EA Handle")
EAHandle.Display("BASE_HEX")
EAHandleOrNetWareHandleOrVolume = uint32("ea_handle_or_netware_handle_or_volume", "EAHandle or NetWare Handle or Volume (see EAFlags)")
EAHandleOrNetWareHandleOrVolume.Display("BASE_HEX")
EAKey = nstring16("ea_key", "EA Key")
EAKeySize = uint32("ea_key_size", "Key Size")
EAKeySizeDuplicated = uint32("ea_key_size_duplicated", "Key Size Duplicated")
EAValue = nstring16("ea_value", "EA Value")
EAValueRep = fw_string("ea_value_rep", "EA Value", 1)
EAValueLength = uint16("ea_value_length", "Value Length")
EchoSocket = uint16("echo_socket", "Echo Socket")
EchoSocket.Display('BASE_HEX')
EffectiveRights = bitfield8("effective_rights", "Effective Rights", [
bf_boolean8(0x01, "effective_rights_read", "Read Rights"),
bf_boolean8(0x02, "effective_rights_write", "Write Rights"),
bf_boolean8(0x04, "effective_rights_open", "Open Rights"),
bf_boolean8(0x08, "effective_rights_create", "Create Rights"),
bf_boolean8(0x10, "effective_rights_delete", "Delete Rights"),
bf_boolean8(0x20, "effective_rights_parental", "Parental Rights"),
bf_boolean8(0x40, "effective_rights_search", "Search Rights"),
bf_boolean8(0x80, "effective_rights_modify", "Modify Rights"),
])
EnumInfoMask = bitfield8("enum_info_mask", "Return Information Mask", [
bf_boolean8(0x01, "enum_info_transport", "Transport Information"),
bf_boolean8(0x02, "enum_info_time", "Time Information"),
bf_boolean8(0x04, "enum_info_name", "Name Information"),
bf_boolean8(0x08, "enum_info_lock", "Lock Information"),
bf_boolean8(0x10, "enum_info_print", "Print Information"),
bf_boolean8(0x20, "enum_info_stats", "Statistical Information"),
bf_boolean8(0x40, "enum_info_account", "Accounting Information"),
bf_boolean8(0x80, "enum_info_auth", "Authentication Information"),
])
eventOffset = bytes("event_offset", "Event Offset", 8)
eventTime = uint32("event_time", "Event Time")
eventTime.Display("BASE_HEX")
ExpirationTime = uint32("expiration_time", "Expiration Time")
ExpirationTime.Display('BASE_HEX')
ExtAttrDataSize = uint32("ext_attr_data_size", "Extended Attributes Data Size")
ExtAttrCount = uint32("ext_attr_count", "Extended Attributes Count")
ExtAttrKeySize = uint32("ext_attr_key_size", "Extended Attributes Key Size")
ExtendedAttributesDefined = uint32("extended_attributes_defined", "Extended Attributes Defined")
ExtendedAttributeExtentsUsed = uint32("extended_attribute_extents_used", "Extended Attribute Extents Used")
ExtendedInfo = bitfield16("ext_info", "Extended Return Information", [
bf_boolean16(0x0001, "ext_info_update", "Last Update"),
bf_boolean16(0x0002, "ext_info_dos_name", "DOS Name"),
bf_boolean16(0x0004, "ext_info_flush", "Flush Time"),
bf_boolean16(0x0008, "ext_info_parental", "Parental"),
bf_boolean16(0x0010, "ext_info_mac_finder", "MAC Finder"),
bf_boolean16(0x0020, "ext_info_sibling", "Sibling"),
bf_boolean16(0x0040, "ext_info_effective", "Effective"),
bf_boolean16(0x0080, "ext_info_mac_date", "MAC Date"),
bf_boolean16(0x0100, "ext_info_access", "Last Access"),
bf_boolean16(0x0400, "ext_info_64_bit_fs", "64 Bit File Sizes"),
bf_boolean16(0x8000, "ext_info_newstyle", "New Style"),
])
ExtentListFormat = uint8("ext_lst_format", "Extent List Format")
RetExtentListCount = uint8("ret_ext_lst_count", "Extent List Count")
EndingOffset = bytes("end_offset", "Ending Offset", 8)
#ExtentLength = bytes("extent_length", "Length", 8),
ExtentList = bytes("ext_lst", "Extent List", 512)
ExtRouterActiveFlag = boolean8("ext_router_active_flag", "External Router Active Flag")
FailedAllocReqCnt = uint32("failed_alloc_req", "Failed Alloc Request Count")
FatalFATWriteErrors = uint16("fatal_fat_write_errors", "Fatal FAT Write Errors")
FATScanErrors = uint16("fat_scan_errors", "FAT Scan Errors")
FATWriteErrors = uint16("fat_write_errors", "FAT Write Errors")
FieldsLenTable = bytes("fields_len_table", "Fields Len Table", 32)
FileCount = uint16("file_count", "File Count")
FileDate = uint16("file_date", "File Date")
FileDate.NWDate()
FileDirWindow = uint16("file_dir_win", "File/Dir Window")
FileDirWindow.Display("BASE_HEX")
FileExecuteType = uint8("file_execute_type", "File Execute Type")
FileExtendedAttributes = val_string8("file_ext_attr", "File Extended Attributes", [
[ 0x00, "Search On All Read Only Opens" ],
[ 0x01, "Search On Read Only Opens With No Path" ],
[ 0x02, "Shell Default Search Mode" ],
[ 0x03, "Search On All Opens With No Path" ],
[ 0x04, "Do Not Search" ],
[ 0x05, "Reserved" ],
[ 0x06, "Search On All Opens" ],
[ 0x07, "Reserved" ],
[ 0x08, "Search On All Read Only Opens/Indexed" ],
[ 0x09, "Search On Read Only Opens With No Path/Indexed" ],
[ 0x0a, "Shell Default Search Mode/Indexed" ],
[ 0x0b, "Search On All Opens With No Path/Indexed" ],
[ 0x0c, "Do Not Search/Indexed" ],
[ 0x0d, "Indexed" ],
[ 0x0e, "Search On All Opens/Indexed" ],
[ 0x0f, "Indexed" ],
[ 0x10, "Search On All Read Only Opens/Transactional" ],
[ 0x11, "Search On Read Only Opens With No Path/Transactional" ],
[ 0x12, "Shell Default Search Mode/Transactional" ],
[ 0x13, "Search On All Opens With No Path/Transactional" ],
[ 0x14, "Do Not Search/Transactional" ],
[ 0x15, "Transactional" ],
[ 0x16, "Search On All Opens/Transactional" ],
[ 0x17, "Transactional" ],
[ 0x18, "Search On All Read Only Opens/Indexed/Transactional" ],
[ 0x19, "Search On Read Only Opens With No Path/Indexed/Transactional" ],
[ 0x1a, "Shell Default Search Mode/Indexed/Transactional" ],
[ 0x1b, "Search On All Opens With No Path/Indexed/Transactional" ],
[ 0x1c, "Do Not Search/Indexed/Transactional" ],
[ 0x1d, "Indexed/Transactional" ],
[ 0x1e, "Search On All Opens/Indexed/Transactional" ],
[ 0x1f, "Indexed/Transactional" ],
[ 0x40, "Search On All Read Only Opens/Read Audit" ],
[ 0x41, "Search On Read Only Opens With No Path/Read Audit" ],
[ 0x42, "Shell Default Search Mode/Read Audit" ],
[ 0x43, "Search On All Opens With No Path/Read Audit" ],
[ 0x44, "Do Not Search/Read Audit" ],
[ 0x45, "Read Audit" ],
[ 0x46, "Search On All Opens/Read Audit" ],
[ 0x47, "Read Audit" ],
[ 0x48, "Search On All Read Only Opens/Indexed/Read Audit" ],
[ 0x49, "Search On Read Only Opens With No Path/Indexed/Read Audit" ],
[ 0x4a, "Shell Default Search Mode/Indexed/Read Audit" ],
[ 0x4b, "Search On All Opens With No Path/Indexed/Read Audit" ],
[ 0x4c, "Do Not Search/Indexed/Read Audit" ],
[ 0x4d, "Indexed/Read Audit" ],
[ 0x4e, "Search On All Opens/Indexed/Read Audit" ],
[ 0x4f, "Indexed/Read Audit" ],
[ 0x50, "Search On All Read Only Opens/Transactional/Read Audit" ],
[ 0x51, "Search On Read Only Opens With No Path/Transactional/Read Audit" ],
[ 0x52, "Shell Default Search Mode/Transactional/Read Audit" ],
[ 0x53, "Search On All Opens With No Path/Transactional/Read Audit" ],
[ 0x54, "Do Not Search/Transactional/Read Audit" ],
[ 0x55, "Transactional/Read Audit" ],
[ 0x56, "Search On All Opens/Transactional/Read Audit" ],
[ 0x57, "Transactional/Read Audit" ],
[ 0x58, "Search On All Read Only Opens/Indexed/Transactional/Read Audit" ],
[ 0x59, "Search On Read Only Opens With No Path/Indexed/Transactional/Read Audit" ],
[ 0x5a, "Shell Default Search Mode/Indexed/Transactional/Read Audit" ],
[ 0x5b, "Search On All Opens With No Path/Indexed/Transactional/Read Audit" ],
[ 0x5c, "Do Not Search/Indexed/Transactional/Read Audit" ],
[ 0x5d, "Indexed/Transactional/Read Audit" ],
[ 0x5e, "Search On All Opens/Indexed/Transactional/Read Audit" ],
[ 0x5f, "Indexed/Transactional/Read Audit" ],
[ 0x80, "Search On All Read Only Opens/Write Audit" ],
[ 0x81, "Search On Read Only Opens With No Path/Write Audit" ],
[ 0x82, "Shell Default Search Mode/Write Audit" ],
[ 0x83, "Search On All Opens With No Path/Write Audit" ],
[ 0x84, "Do Not Search/Write Audit" ],
[ 0x85, "Write Audit" ],
[ 0x86, "Search On All Opens/Write Audit" ],
[ 0x87, "Write Audit" ],
[ 0x88, "Search On All Read Only Opens/Indexed/Write Audit" ],
[ 0x89, "Search On Read Only Opens With No Path/Indexed/Write Audit" ],
[ 0x8a, "Shell Default Search Mode/Indexed/Write Audit" ],
[ 0x8b, "Search On All Opens With No Path/Indexed/Write Audit" ],
[ 0x8c, "Do Not Search/Indexed/Write Audit" ],
[ 0x8d, "Indexed/Write Audit" ],
[ 0x8e, "Search On All Opens/Indexed/Write Audit" ],
[ 0x8f, "Indexed/Write Audit" ],
[ 0x90, "Search On All Read Only Opens/Transactional/Write Audit" ],
[ 0x91, "Search On Read Only Opens With No Path/Transactional/Write Audit" ],
[ 0x92, "Shell Default Search Mode/Transactional/Write Audit" ],
[ 0x93, "Search On All Opens With No Path/Transactional/Write Audit" ],
[ 0x94, "Do Not Search/Transactional/Write Audit" ],
[ 0x95, "Transactional/Write Audit" ],
[ 0x96, "Search On All Opens/Transactional/Write Audit" ],
[ 0x97, "Transactional/Write Audit" ],
[ 0x98, "Search On All Read Only Opens/Indexed/Transactional/Write Audit" ],
[ 0x99, "Search On Read Only Opens With No Path/Indexed/Transactional/Write Audit" ],
[ 0x9a, "Shell Default Search Mode/Indexed/Transactional/Write Audit" ],
[ 0x9b, "Search On All Opens With No Path/Indexed/Transactional/Write Audit" ],
[ 0x9c, "Do Not Search/Indexed/Transactional/Write Audit" ],
[ 0x9d, "Indexed/Transactional/Write Audit" ],
[ 0x9e, "Search On All Opens/Indexed/Transactional/Write Audit" ],
[ 0x9f, "Indexed/Transactional/Write Audit" ],
[ 0xa0, "Search On All Read Only Opens/Read Audit/Write Audit" ],
[ 0xa1, "Search On Read Only Opens With No Path/Read Audit/Write Audit" ],
[ 0xa2, "Shell Default Search Mode/Read Audit/Write Audit" ],
[ 0xa3, "Search On All Opens With No Path/Read Audit/Write Audit" ],
[ 0xa4, "Do Not Search/Read Audit/Write Audit" ],
[ 0xa5, "Read Audit/Write Audit" ],
[ 0xa6, "Search On All Opens/Read Audit/Write Audit" ],
[ 0xa7, "Read Audit/Write Audit" ],
[ 0xa8, "Search On All Read Only Opens/Indexed/Read Audit/Write Audit" ],
[ 0xa9, "Search On Read Only Opens With No Path/Indexed/Read Audit/Write Audit" ],
[ 0xaa, "Shell Default Search Mode/Indexed/Read Audit/Write Audit" ],
[ 0xab, "Search On All Opens With No Path/Indexed/Read Audit/Write Audit" ],
[ 0xac, "Do Not Search/Indexed/Read Audit/Write Audit" ],
[ 0xad, "Indexed/Read Audit/Write Audit" ],
[ 0xae, "Search On All Opens/Indexed/Read Audit/Write Audit" ],
[ 0xaf, "Indexed/Read Audit/Write Audit" ],
[ 0xb0, "Search On All Read Only Opens/Transactional/Read Audit/Write Audit" ],
[ 0xb1, "Search On Read Only Opens With No Path/Transactional/Read Audit/Write Audit" ],
[ 0xb2, "Shell Default Search Mode/Transactional/Read Audit/Write Audit" ],
[ 0xb3, "Search On All Opens With No Path/Transactional/Read Audit/Write Audit" ],
[ 0xb4, "Do Not Search/Transactional/Read Audit/Write Audit" ],
[ 0xb5, "Transactional/Read Audit/Write Audit" ],
[ 0xb6, "Search On All Opens/Transactional/Read Audit/Write Audit" ],
[ 0xb7, "Transactional/Read Audit/Write Audit" ],
[ 0xb8, "Search On All Read Only Opens/Indexed/Transactional/Read Audit/Write Audit" ],
[ 0xb9, "Search On Read Only Opens With No Path/Indexed/Transactional/Read Audit/Write Audit" ],
[ 0xba, "Shell Default Search Mode/Indexed/Transactional/Read Audit/Write Audit" ],
[ 0xbb, "Search On All Opens With No Path/Indexed/Transactional/Read Audit/Write Audit" ],
[ 0xbc, "Do Not Search/Indexed/Transactional/Read Audit/Write Audit" ],
[ 0xbd, "Indexed/Transactional/Read Audit/Write Audit" ],
[ 0xbe, "Search On All Opens/Indexed/Transactional/Read Audit/Write Audit" ],
[ 0xbf, "Indexed/Transactional/Read Audit/Write Audit" ],
])
fileFlags = uint32("file_flags", "File Flags")
FileHandle = bytes("file_handle", "File Handle", 6)
FileLimbo = uint32("file_limbo", "File Limbo")
FileListCount = uint32("file_list_count", "File List Count")
FileLock = val_string8("file_lock", "File Lock", [
[ 0x00, "Not Locked" ],
[ 0xfe, "Locked by file lock" ],
[ 0xff, "Unknown" ],
])
FileLockCount = uint16("file_lock_count", "File Lock Count")
FileMigrationState = val_string8("file_mig_state", "File Migration State", [
[ 0x00, "Mark file ineligible for file migration" ],
[ 0x01, "Mark file eligible for file migration" ],
[ 0x02, "Mark file as migrated and delete fat chains" ],
[ 0x03, "Reset file status back to normal" ],
[ 0x04, "Get file data back and reset file status back to normal" ],
])
FileMode = uint8("file_mode", "File Mode")
FileName = nstring8("file_name", "Filename")
FileName12 = fw_string("file_name_12", "Filename", 12)
FileName14 = fw_string("file_name_14", "Filename", 14)
FileName16 = nstring16("file_name_16", "Filename")
FileNameLen = uint8("file_name_len", "Filename Length")
FileOffset = uint32("file_offset", "File Offset")
FilePath = nstring8("file_path", "File Path")
FileSize = uint32("file_size", "File Size", ENC_BIG_ENDIAN)
FileSize64bit = uint64("f_size_64bit", "64bit File Size")
FileSystemID = uint8("file_system_id", "File System ID")
FileTime = uint16("file_time", "File Time")
FileTime.NWTime()
FileUseCount = uint16("file_use_count", "File Use Count")
FileWriteFlags = val_string8("file_write_flags", "File Write Flags", [
[ 0x01, "Writing" ],
[ 0x02, "Write aborted" ],
])
FileWriteState = val_string8("file_write_state", "File Write State", [
[ 0x00, "Not Writing" ],
[ 0x01, "Write in Progress" ],
[ 0x02, "Write Being Stopped" ],
])
Filler = uint8("filler", "Filler")
FinderAttr = bitfield16("finder_attr", "Finder Info Attributes", [
bf_boolean16(0x0001, "finder_attr_desktop", "Object on Desktop"),
bf_boolean16(0x2000, "finder_attr_invisible", "Object is Invisible"),
bf_boolean16(0x4000, "finder_attr_bundle", "Object Has Bundle"),
])
FixedBitMask = uint32("fixed_bit_mask", "Fixed Bit Mask")
FixedBitsDefined = uint16("fixed_bits_defined", "Fixed Bits Defined")
FlagBits = uint8("flag_bits", "Flag Bits")
Flags = uint8("flags", "Flags")
FlagsDef = uint16("flags_def", "Flags")
FlushTime = uint32("flush_time", "Flush Time")
FolderFlag = val_string8("folder_flag", "Folder Flag", [
[ 0x00, "Not a Folder" ],
[ 0x01, "Folder" ],
])
ForkCount = uint8("fork_count", "Fork Count")
ForkIndicator = val_string8("fork_indicator", "Fork Indicator", [
[ 0x00, "Data Fork" ],
[ 0x01, "Resource Fork" ],
])
ForceFlag = val_string8("force_flag", "Force Server Down Flag", [
[ 0x00, "Down Server if No Files Are Open" ],
[ 0xff, "Down Server Immediately, Auto-Close Open Files" ],
])
ForgedDetachedRequests = uint16("forged_detached_requests", "Forged Detached Requests")
FormType = uint16( "form_type", "Form Type" )
FormTypeCnt = uint32("form_type_count", "Form Types Count")
FoundSomeMem = uint32("found_some_mem", "Found Some Memory")
FractionalSeconds = eptime("fractional_time", "Fractional Time in Seconds")
FraggerHandle = uint32("fragger_handle", "Fragment Handle")
FraggerHandle.Display('BASE_HEX')
FragmentWriteOccurred = uint16("fragment_write_occurred", "Fragment Write Occurred")
FragSize = uint32("frag_size", "Fragment Size")
FreeableLimboSectors = uint32("freeable_limbo_sectors", "Freeable Limbo Sectors")
FreeBlocks = uint32("free_blocks", "Free Blocks")
FreedClusters = uint32("freed_clusters", "Freed Clusters")
FreeDirectoryEntries = uint16("free_directory_entries", "Free Directory Entries")
FSEngineFlag = boolean8("fs_engine_flag", "FS Engine Flag")
FullName = fw_string("full_name", "Full Name", 39)
GetSetFlag = val_string8("get_set_flag", "Get Set Flag", [
[ 0x00, "Get the default support module ID" ],
[ 0x01, "Set the default support module ID" ],
])
GUID = bytes("guid", "GUID", 16)
HandleFlag = val_string8("handle_flag", "Handle Flag", [
[ 0x00, "Short Directory Handle" ],
[ 0x01, "Directory Base" ],
[ 0xFF, "No Handle Present" ],
])
HandleInfoLevel = val_string8("handle_info_level", "Handle Info Level", [
[ 0x00, "Get Limited Information from a File Handle" ],
[ 0x01, "Get Limited Information from a File Handle Using a Name Space" ],
[ 0x02, "Get Information from a File Handle" ],
[ 0x03, "Get Information from a Directory Handle" ],
[ 0x04, "Get Complete Information from a Directory Handle" ],
[ 0x05, "Get Complete Information from a File Handle" ],
])
HeldBytesRead = bytes("held_bytes_read", "Held Bytes Read", 6)
HeldBytesWritten = bytes("held_bytes_write", "Held Bytes Written", 6)
HeldConnectTimeInMinutes = uint32("held_conn_time", "Held Connect Time in Minutes")
HeldRequests = uint32("user_info_held_req", "Held Requests")
HoldAmount = uint32("hold_amount", "Hold Amount")
HoldCancelAmount = uint32("hold_cancel_amount", "Hold Cancel Amount")
HolderID = uint32("holder_id", "Holder ID")
HolderID.Display("BASE_HEX")
HoldTime = uint32("hold_time", "Hold Time")
HopsToNet = uint16("hops_to_net", "Hop Count")
HorizLocation = uint16("horiz_location", "Horizontal Location")
HostAddress = bytes("host_address", "Host Address", 6)
HotFixBlocksAvailable = uint16("hot_fix_blocks_available", "Hot Fix Blocks Available")
HotFixDisabled = val_string8("hot_fix_disabled", "Hot Fix Disabled", [
[ 0x00, "Enabled" ],
[ 0x01, "Disabled" ],
])
HotFixTableSize = uint16("hot_fix_table_size", "Hot Fix Table Size")
HotFixTableStart = uint32("hot_fix_table_start", "Hot Fix Table Start")
Hour = uint8("s_hour", "Hour")
HugeBitMask = uint32("huge_bit_mask", "Huge Bit Mask")
HugeBitsDefined = uint16("huge_bits_defined", "Huge Bits Defined")
HugeData = nstring8("huge_data", "Huge Data")
HugeDataUsed = uint32("huge_data_used", "Huge Data Used")
HugeStateInfo = bytes("huge_state_info", "Huge State Info", 16)
IdentificationNumber = uint32("identification_number", "Identification Number")
IgnoredRxPkts = uint32("ignored_rx_pkts", "Ignored Receive Packets")
IncomingPacketDiscardedNoDGroup = uint16("incoming_packet_discarded_no_dgroup", "Incoming Packet Discarded No DGroup")
IndexNumber = uint8("index_number", "Index Number")
InfoCount = uint16("info_count", "Info Count")
InfoFlags = bitfield32("info_flags", "Info Flags", [
bf_boolean32(0x10000000, "info_flags_security", "Return Object Security"),
bf_boolean32(0x20000000, "info_flags_flags", "Return Object Flags"),
bf_boolean32(0x40000000, "info_flags_type", "Return Object Type"),
bf_boolean32(0x80000000, "info_flags_name", "Return Object Name"),
])
InfoLevelNumber = val_string8("info_level_num", "Information Level Number", [
[ 0x0, "Single Directory Quota Information" ],
[ 0x1, "Multi-Level Directory Quota Information" ],
])
InfoMask = bitfield32("info_mask", "Information Mask", [
bf_boolean32(0x00000001, "info_flags_dos_time", "DOS Time"),
bf_boolean32(0x00000002, "info_flags_ref_count", "Reference Count"),
bf_boolean32(0x00000004, "info_flags_dos_attr", "DOS Attributes"),
bf_boolean32(0x00000008, "info_flags_ids", "ID's"),
bf_boolean32(0x00000010, "info_flags_ds_sizes", "Data Stream Sizes"),
bf_boolean32(0x00000020, "info_flags_ns_attr", "Name Space Attributes"),
bf_boolean32(0x00000040, "info_flags_ea_present", "EA Present Flag"),
bf_boolean32(0x00000080, "info_flags_all_attr", "All Attributes"),
bf_boolean32(0x00000100, "info_flags_all_dirbase_num", "All Directory Base Numbers"),
bf_boolean32(0x00000200, "info_flags_max_access_mask", "Maximum Access Mask"),
bf_boolean32(0x00000400, "info_flags_flush_time", "Flush Time"),
bf_boolean32(0x00000800, "info_flags_prnt_base_id", "Parent Base ID"),
bf_boolean32(0x00001000, "info_flags_mac_finder", "Mac Finder Information"),
bf_boolean32(0x00002000, "info_flags_sibling_cnt", "Sibling Count"),
bf_boolean32(0x00004000, "info_flags_effect_rights", "Effective Rights"),
bf_boolean32(0x00008000, "info_flags_mac_time", "Mac Time"),
bf_boolean32(0x20000000, "info_mask_dosname", "DOS Name"),
bf_boolean32(0x40000000, "info_mask_c_name_space", "Creator Name Space & Name"),
bf_boolean32(0x80000000, "info_mask_name", "Name"),
])
InheritedRightsMask = bitfield16("inherited_rights_mask", "Inherited Rights Mask", [
bf_boolean16(0x0001, "inh_rights_read", "Read Rights"),
bf_boolean16(0x0002, "inh_rights_write", "Write Rights"),
bf_boolean16(0x0004, "inh_rights_open", "Open Rights"),
bf_boolean16(0x0008, "inh_rights_create", "Create Rights"),
bf_boolean16(0x0010, "inh_rights_delete", "Delete Rights"),
bf_boolean16(0x0020, "inh_rights_parent", "Change Access"),
bf_boolean16(0x0040, "inh_rights_search", "See Files Flag"),
bf_boolean16(0x0080, "inh_rights_modify", "Modify Rights"),
bf_boolean16(0x0100, "inh_rights_supervisor", "Supervisor"),
])
InheritanceRevokeMask = bitfield16("inheritance_revoke_mask", "Revoke Rights Mask", [
bf_boolean16(0x0001, "inh_revoke_read", "Read Rights"),
bf_boolean16(0x0002, "inh_revoke_write", "Write Rights"),
bf_boolean16(0x0004, "inh_revoke_open", "Open Rights"),
bf_boolean16(0x0008, "inh_revoke_create", "Create Rights"),
bf_boolean16(0x0010, "inh_revoke_delete", "Delete Rights"),
bf_boolean16(0x0020, "inh_revoke_parent", "Change Access"),
bf_boolean16(0x0040, "inh_revoke_search", "See Files Flag"),
bf_boolean16(0x0080, "inh_revoke_modify", "Modify Rights"),
bf_boolean16(0x0100, "inh_revoke_supervisor", "Supervisor"),
])
InitialSemaphoreValue = uint8("initial_semaphore_value", "Initial Semaphore Value")
InpInfotype = uint32("inp_infotype", "Information Type")
Inpld = uint32("inp_ld", "Volume Number or Directory Handle")
InspectSize = uint32("inspect_size", "Inspect Size")
InternetBridgeVersion = uint8("internet_bridge_version", "Internet Bridge Version")
InterruptNumbersUsed = uint32("interrupt_numbers_used", "Interrupt Numbers Used")
InUse = uint32("in_use", "Blocks in Use")
InUse64 = uint64("in_use64", "Blocks in Use")
IOAddressesUsed = bytes("io_addresses_used", "IO Addresses Used", 8)
IOErrorCount = uint16("io_error_count", "IO Error Count")
IOEngineFlag = boolean8("io_engine_flag", "IO Engine Flag")
IPXNotMyNetwork = uint16("ipx_not_my_network", "IPX Not My Network")
ItemsChanged = uint32("items_changed", "Items Changed")
ItemsChecked = uint32("items_checked", "Items Checked")
ItemsCount = uint32("items_count", "Items Count")
itemsInList = uint32("items_in_list", "Items in List")
ItemsInPacket = uint32("items_in_packet", "Items in Packet")
JobControlFlags = bitfield8("job_control_flags", "Job Control Flags", [
bf_boolean8(0x08, "job_control_job_recovery", "Job Recovery"),
bf_boolean8(0x10, "job_control_reservice", "ReService Job"),
bf_boolean8(0x20, "job_control_file_open", "File Open"),
bf_boolean8(0x40, "job_control_user_hold", "User Hold"),
bf_boolean8(0x80, "job_control_operator_hold", "Operator Hold"),
])
JobControlFlagsWord = bitfield16("job_control_flags_word", "Job Control Flags", [
bf_boolean16(0x0008, "job_control1_job_recovery", "Job Recovery"),
bf_boolean16(0x0010, "job_control1_reservice", "ReService Job"),
bf_boolean16(0x0020, "job_control1_file_open", "File Open"),
bf_boolean16(0x0040, "job_control1_user_hold", "User Hold"),
bf_boolean16(0x0080, "job_control1_operator_hold", "Operator Hold"),
])
JobCount = uint32("job_count", "Job Count")
JobFileHandle = bytes("job_file_handle", "Job File Handle", 6)
JobFileHandleLong = uint32("job_file_handle_long", "Job File Handle", ENC_BIG_ENDIAN)
JobFileHandleLong.Display("BASE_HEX")
JobFileName = fw_string("job_file_name", "Job File Name", 14)
JobPosition = uint8("job_position", "Job Position")
JobPositionWord = uint16("job_position_word", "Job Position")
JobNumber = uint16("job_number", "Job Number", ENC_BIG_ENDIAN )
JobNumberLong = uint32("job_number_long", "Job Number", ENC_BIG_ENDIAN )
JobNumberLong.Display("BASE_HEX")
JobType = uint16("job_type", "Job Type", ENC_BIG_ENDIAN )
LANCustomVariablesCount = uint32("lan_cust_var_count", "LAN Custom Variables Count")
LANdriverBoardInstance = uint16("lan_drv_bd_inst", "LAN Driver Board Instance")
LANdriverBoardNumber = uint16("lan_drv_bd_num", "LAN Driver Board Number")
LANdriverCardID = uint16("lan_drv_card_id", "LAN Driver Card ID")
LANdriverCardName = fw_string("lan_drv_card_name", "LAN Driver Card Name", 28)
LANdriverCFG_MajorVersion = uint8("lan_dvr_cfg_major_vrs", "LAN Driver Config - Major Version")
LANdriverCFG_MinorVersion = uint8("lan_dvr_cfg_minor_vrs", "LAN Driver Config - Minor Version")
LANdriverDMAUsage1 = uint8("lan_drv_dma_usage1", "Primary DMA Channel")
LANdriverDMAUsage2 = uint8("lan_drv_dma_usage2", "Secondary DMA Channel")
LANdriverFlags = uint16("lan_drv_flags", "LAN Driver Flags")
LANdriverFlags.Display("BASE_HEX")
LANdriverInterrupt1 = uint8("lan_drv_interrupt1", "Primary Interrupt Vector")
LANdriverInterrupt2 = uint8("lan_drv_interrupt2", "Secondary Interrupt Vector")
LANdriverIOPortsAndRanges1 = uint16("lan_drv_io_ports_and_ranges_1", "Primary Base I/O Port")
LANdriverIOPortsAndRanges2 = uint16("lan_drv_io_ports_and_ranges_2", "Number of I/O Ports")
LANdriverIOPortsAndRanges3 = uint16("lan_drv_io_ports_and_ranges_3", "Secondary Base I/O Port")
LANdriverIOPortsAndRanges4 = uint16("lan_drv_io_ports_and_ranges_4", "Number of I/O Ports")
LANdriverIOReserved = bytes("lan_drv_io_reserved", "LAN Driver IO Reserved", 14)
LANdriverLineSpeed = uint16("lan_drv_line_speed", "LAN Driver Line Speed")
LANdriverLink = uint32("lan_drv_link", "LAN Driver Link")
LANdriverLogicalName = bytes("lan_drv_log_name", "LAN Driver Logical Name", 18)
LANdriverMajorVersion = uint8("lan_drv_major_ver", "LAN Driver Major Version")
LANdriverMaximumSize = uint32("lan_drv_max_size", "LAN Driver Maximum Size")
LANdriverMaxRecvSize = uint32("lan_drv_max_rcv_size", "LAN Driver Maximum Receive Size")
LANdriverMediaID = uint16("lan_drv_media_id", "LAN Driver Media ID")
LANdriverMediaType = fw_string("lan_drv_media_type", "LAN Driver Media Type", 40)
LANdriverMemoryDecode0 = uint32("lan_drv_mem_decode_0", "LAN Driver Memory Decode 0")
LANdriverMemoryDecode1 = uint32("lan_drv_mem_decode_1", "LAN Driver Memory Decode 1")
LANdriverMemoryLength0 = uint16("lan_drv_mem_length_0", "LAN Driver Memory Length 0")
LANdriverMemoryLength1 = uint16("lan_drv_mem_length_1", "LAN Driver Memory Length 1")
LANdriverMinorVersion = uint8("lan_drv_minor_ver", "LAN Driver Minor Version")
LANdriverModeFlags = val_string8("lan_dvr_mode_flags", "LAN Driver Mode Flags", [
[0x80, "Canonical Address" ],
[0x81, "Canonical Address" ],
[0x82, "Canonical Address" ],
[0x83, "Canonical Address" ],
[0x84, "Canonical Address" ],
[0x85, "Canonical Address" ],
[0x86, "Canonical Address" ],
[0x87, "Canonical Address" ],
[0x88, "Canonical Address" ],
[0x89, "Canonical Address" ],
[0x8a, "Canonical Address" ],
[0x8b, "Canonical Address" ],
[0x8c, "Canonical Address" ],
[0x8d, "Canonical Address" ],
[0x8e, "Canonical Address" ],
[0x8f, "Canonical Address" ],
[0x90, "Canonical Address" ],
[0x91, "Canonical Address" ],
[0x92, "Canonical Address" ],
[0x93, "Canonical Address" ],
[0x94, "Canonical Address" ],
[0x95, "Canonical Address" ],
[0x96, "Canonical Address" ],
[0x97, "Canonical Address" ],
[0x98, "Canonical Address" ],
[0x99, "Canonical Address" ],
[0x9a, "Canonical Address" ],
[0x9b, "Canonical Address" ],
[0x9c, "Canonical Address" ],
[0x9d, "Canonical Address" ],
[0x9e, "Canonical Address" ],
[0x9f, "Canonical Address" ],
[0xa0, "Canonical Address" ],
[0xa1, "Canonical Address" ],
[0xa2, "Canonical Address" ],
[0xa3, "Canonical Address" ],
[0xa4, "Canonical Address" ],
[0xa5, "Canonical Address" ],
[0xa6, "Canonical Address" ],
[0xa7, "Canonical Address" ],
[0xa8, "Canonical Address" ],
[0xa9, "Canonical Address" ],
[0xaa, "Canonical Address" ],
[0xab, "Canonical Address" ],
[0xac, "Canonical Address" ],
[0xad, "Canonical Address" ],
[0xae, "Canonical Address" ],
[0xaf, "Canonical Address" ],
[0xb0, "Canonical Address" ],
[0xb1, "Canonical Address" ],
[0xb2, "Canonical Address" ],
[0xb3, "Canonical Address" ],
[0xb4, "Canonical Address" ],
[0xb5, "Canonical Address" ],
[0xb6, "Canonical Address" ],
[0xb7, "Canonical Address" ],
[0xb8, "Canonical Address" ],
[0xb9, "Canonical Address" ],
[0xba, "Canonical Address" ],
[0xbb, "Canonical Address" ],
[0xbc, "Canonical Address" ],
[0xbd, "Canonical Address" ],
[0xbe, "Canonical Address" ],
[0xbf, "Canonical Address" ],
[0xc0, "Non-Canonical Address" ],
[0xc1, "Non-Canonical Address" ],
[0xc2, "Non-Canonical Address" ],
[0xc3, "Non-Canonical Address" ],
[0xc4, "Non-Canonical Address" ],
[0xc5, "Non-Canonical Address" ],
[0xc6, "Non-Canonical Address" ],
[0xc7, "Non-Canonical Address" ],
[0xc8, "Non-Canonical Address" ],
[0xc9, "Non-Canonical Address" ],
[0xca, "Non-Canonical Address" ],
[0xcb, "Non-Canonical Address" ],
[0xcc, "Non-Canonical Address" ],
[0xcd, "Non-Canonical Address" ],
[0xce, "Non-Canonical Address" ],
[0xcf, "Non-Canonical Address" ],
[0xd0, "Non-Canonical Address" ],
[0xd1, "Non-Canonical Address" ],
[0xd2, "Non-Canonical Address" ],
[0xd3, "Non-Canonical Address" ],
[0xd4, "Non-Canonical Address" ],
[0xd5, "Non-Canonical Address" ],
[0xd6, "Non-Canonical Address" ],
[0xd7, "Non-Canonical Address" ],
[0xd8, "Non-Canonical Address" ],
[0xd9, "Non-Canonical Address" ],
[0xda, "Non-Canonical Address" ],
[0xdb, "Non-Canonical Address" ],
[0xdc, "Non-Canonical Address" ],
[0xdd, "Non-Canonical Address" ],
[0xde, "Non-Canonical Address" ],
[0xdf, "Non-Canonical Address" ],
[0xe0, "Non-Canonical Address" ],
[0xe1, "Non-Canonical Address" ],
[0xe2, "Non-Canonical Address" ],
[0xe3, "Non-Canonical Address" ],
[0xe4, "Non-Canonical Address" ],
[0xe5, "Non-Canonical Address" ],
[0xe6, "Non-Canonical Address" ],
[0xe7, "Non-Canonical Address" ],
[0xe8, "Non-Canonical Address" ],
[0xe9, "Non-Canonical Address" ],
[0xea, "Non-Canonical Address" ],
[0xeb, "Non-Canonical Address" ],
[0xec, "Non-Canonical Address" ],
[0xed, "Non-Canonical Address" ],
[0xee, "Non-Canonical Address" ],
[0xef, "Non-Canonical Address" ],
[0xf0, "Non-Canonical Address" ],
[0xf1, "Non-Canonical Address" ],
[0xf2, "Non-Canonical Address" ],
[0xf3, "Non-Canonical Address" ],
[0xf4, "Non-Canonical Address" ],
[0xf5, "Non-Canonical Address" ],
[0xf6, "Non-Canonical Address" ],
[0xf7, "Non-Canonical Address" ],
[0xf8, "Non-Canonical Address" ],
[0xf9, "Non-Canonical Address" ],
[0xfa, "Non-Canonical Address" ],
[0xfb, "Non-Canonical Address" ],
[0xfc, "Non-Canonical Address" ],
[0xfd, "Non-Canonical Address" ],
[0xfe, "Non-Canonical Address" ],
[0xff, "Non-Canonical Address" ],
])
LANDriverNumber = uint8("lan_driver_number", "LAN Driver Number")
LANdriverNodeAddress = bytes("lan_dvr_node_addr", "LAN Driver Node Address", 6)
LANdriverRecvSize = uint32("lan_drv_rcv_size", "LAN Driver Receive Size")
LANdriverReserved = uint16("lan_drv_reserved", "LAN Driver Reserved")
LANdriverSendRetries = uint16("lan_drv_snd_retries", "LAN Driver Send Retries")
LANdriverSharingFlags = uint16("lan_drv_share", "LAN Driver Sharing Flags")
LANdriverShortName = fw_string("lan_drv_short_name", "LAN Driver Short Name", 40)
LANdriverSlot = uint16("lan_drv_slot", "LAN Driver Slot")
LANdriverSrcRouting = uint32("lan_drv_src_route", "LAN Driver Source Routing")
LANdriverTransportTime = uint16("lan_drv_trans_time", "LAN Driver Transport Time")
LastAccessedDate = uint16("last_access_date", "Last Accessed Date")
LastAccessedDate.NWDate()
LastAccessedTime = uint16("last_access_time", "Last Accessed Time")
LastAccessedTime.NWTime()
LastGarbCollect = uint32("last_garbage_collect", "Last Garbage Collection")
LastInstance = uint32("last_instance", "Last Instance")
LastRecordSeen = uint16("last_record_seen", "Last Record Seen")
LastSearchIndex = uint16("last_search_index", "Search Index")
LastSeen = uint32("last_seen", "Last Seen")
LastSequenceNumber = uint16("last_sequence_number", "Sequence Number")
Length64bit = bytes("length_64bit", "64bit Length", 64)
Level = uint8("level", "Level")
LFSCounters = uint32("lfs_counters", "LFS Counters")
LimboDataStreamsCount = uint32("limbo_data_streams_count", "Limbo Data Streams Count")
limbCount = uint32("limb_count", "Limb Count")
limbFlags = bitfield32("limb_flags", "Limb Flags", [
bf_boolean32(0x00000002, "scan_entire_folder", "Wild Search"),
bf_boolean32(0x00000004, "scan_files_only", "Scan Files Only"),
bf_boolean32(0x00000008, "scan_folders_only", "Scan Folders Only"),
bf_boolean32(0x00000010, "allow_system", "Allow System Files and Folders"),
bf_boolean32(0x00000020, "allow_hidden", "Allow Hidden Files and Folders"),
])
limbScanNum = uint32("limb_scan_num", "Limb Scan Number")
LimboUsed = uint32("limbo_used", "Limbo Used")
LoadedNameSpaces = uint8("loaded_name_spaces", "Loaded Name Spaces")
LocalConnectionID = uint32("local_connection_id", "Local Connection ID")
LocalConnectionID.Display("BASE_HEX")
LocalMaxPacketSize = uint32("local_max_packet_size", "Local Max Packet Size")
LocalMaxSendSize = uint32("local_max_send_size", "Local Max Send Size")
LocalMaxRecvSize = uint32("local_max_recv_size", "Local Max Recv Size")
LocalLoginInfoCcode = uint8("local_login_info_ccode", "Local Login Info C Code")
LocalTargetSocket = uint32("local_target_socket", "Local Target Socket")
LocalTargetSocket.Display("BASE_HEX")
LockAreaLen = uint32("lock_area_len", "Lock Area Length")
LockAreasStartOffset = uint32("lock_areas_start_offset", "Lock Areas Start Offset")
LockTimeout = uint16("lock_timeout", "Lock Timeout")
Locked = val_string8("locked", "Locked Flag", [
[ 0x00, "Not Locked Exclusively" ],
[ 0x01, "Locked Exclusively" ],
])
LockFlag = val_string8("lock_flag", "Lock Flag", [
[ 0x00, "Not Locked, Log for Future Exclusive Lock" ],
[ 0x01, "Exclusive Lock (Read/Write)" ],
[ 0x02, "Log for Future Shared Lock"],
[ 0x03, "Shareable Lock (Read-Only)" ],
[ 0xfe, "Locked by a File Lock" ],
[ 0xff, "Locked by Begin Share File Set" ],
])
LockName = nstring8("lock_name", "Lock Name")
LockStatus = val_string8("lock_status", "Lock Status", [
[ 0x00, "Locked Exclusive" ],
[ 0x01, "Locked Shareable" ],
[ 0x02, "Logged" ],
[ 0x06, "Lock is Held by TTS"],
])
ConnLockStatus = val_string8("conn_lock_status", "Lock Status", [
[ 0x00, "Normal (connection free to run)" ],
[ 0x01, "Waiting on physical record lock" ],
[ 0x02, "Waiting on a file lock" ],
[ 0x03, "Waiting on a logical record lock"],
[ 0x04, "Waiting on a semaphore"],
])
LockType = val_string8("lock_type", "Lock Type", [
[ 0x00, "Locked" ],
[ 0x01, "Open Shareable" ],
[ 0x02, "Logged" ],
[ 0x03, "Open Normal" ],
[ 0x06, "TTS Holding Lock" ],
[ 0x07, "Transaction Flag Set on This File" ],
])
LogFileFlagHigh = bitfield8("log_file_flag_high", "Log File Flag (byte 2)", [
bf_boolean8(0x80, "log_flag_call_back", "Call Back Requested" ),
])
LogFileFlagLow = bitfield8("log_file_flag_low", "Log File Flag", [
bf_boolean8(0x01, "log_flag_lock_file", "Lock File Immediately" ),
])
LoggedObjectID = uint32("logged_object_id", "Logged in Object ID")
LoggedObjectID.Display("BASE_HEX")
LoggedCount = uint16("logged_count", "Logged Count")
LogicalConnectionNumber = uint16("logical_connection_number", "Logical Connection Number", ENC_BIG_ENDIAN)
LogicalDriveCount = uint8("logical_drive_count", "Logical Drive Count")
LogicalDriveNumber = uint8("logical_drive_number", "Logical Drive Number")
LogicalLockThreshold = uint8("logical_lock_threshold", "LogicalLockThreshold")
LogicalRecordName = nstring8("logical_record_name", "Logical Record Name")
LoginKey = bytes("login_key", "Login Key", 8)
LogLockType = uint8("log_lock_type", "Log Lock Type")
LogTtlRxPkts = uint32("log_ttl_rx_pkts", "Total Received Packets")
LogTtlTxPkts = uint32("log_ttl_tx_pkts", "Total Transmitted Packets")
LongName = fw_string("long_name", "Long Name", 32)
LRUBlockWasDirty = uint16("lru_block_was_dirty", "LRU Block Was Dirty")
MacAttr = bitfield16("mac_attr", "Attributes", [
bf_boolean16(0x0001, "mac_attr_smode1", "Search Mode"),
bf_boolean16(0x0002, "mac_attr_smode2", "Search Mode"),
bf_boolean16(0x0004, "mac_attr_smode3", "Search Mode"),
bf_boolean16(0x0010, "mac_attr_transaction", "Transaction"),
bf_boolean16(0x0020, "mac_attr_index", "Index"),
bf_boolean16(0x0040, "mac_attr_r_audit", "Read Audit"),
bf_boolean16(0x0080, "mac_attr_w_audit", "Write Audit"),
bf_boolean16(0x0100, "mac_attr_r_only", "Read Only"),
bf_boolean16(0x0200, "mac_attr_hidden", "Hidden"),
bf_boolean16(0x0400, "mac_attr_system", "System"),
bf_boolean16(0x0800, "mac_attr_execute_only", "Execute Only"),
bf_boolean16(0x1000, "mac_attr_subdirectory", "Subdirectory"),
bf_boolean16(0x2000, "mac_attr_archive", "Archive"),
bf_boolean16(0x8000, "mac_attr_share", "Shareable File"),
])
MACBackupDate = uint16("mac_backup_date", "Mac Backup Date")
MACBackupDate.NWDate()
MACBackupTime = uint16("mac_backup_time", "Mac Backup Time")
MACBackupTime.NWTime()
MacBaseDirectoryID = uint32("mac_base_directory_id", "Mac Base Directory ID", ENC_BIG_ENDIAN)
MacBaseDirectoryID.Display("BASE_HEX")
MACCreateDate = uint16("mac_create_date", "Mac Create Date")
MACCreateDate.NWDate()
MACCreateTime = uint16("mac_create_time", "Mac Create Time")
MACCreateTime.NWTime()
MacDestinationBaseID = uint32("mac_destination_base_id", "Mac Destination Base ID")
MacDestinationBaseID.Display("BASE_HEX")
MacFinderInfo = bytes("mac_finder_info", "Mac Finder Information", 32)
MacLastSeenID = uint32("mac_last_seen_id", "Mac Last Seen ID")
MacLastSeenID.Display("BASE_HEX")
MacSourceBaseID = uint32("mac_source_base_id", "Mac Source Base ID")
MacSourceBaseID.Display("BASE_HEX")
MajorVersion = uint32("major_version", "Major Version")
MaxBytes = uint16("max_bytes", "Maximum Number of Bytes")
MaxDataStreams = uint32("max_data_streams", "Maximum Data Streams")
MaxDirDepth = uint32("max_dir_depth", "Maximum Directory Depth")
MaximumSpace = uint16("max_space", "Maximum Space")
MaxNumOfConn = uint32("max_num_of_conn", "Maximum Number of Connections")
MaxNumOfLANS = uint32("max_num_of_lans", "Maximum Number Of LAN's")
MaxNumOfMedias = uint32("max_num_of_medias", "Maximum Number Of Media's")
MaxNumOfNmeSps = uint32("max_num_of_nme_sps", "Maximum Number Of Name Spaces")
MaxNumOfSpoolPr = uint32("max_num_of_spool_pr", "Maximum Number Of Spool Printers")
MaxNumOfStacks = uint32("max_num_of_stacks", "Maximum Number Of Stacks")
MaxNumOfUsers = uint32("max_num_of_users", "Maximum Number Of Users")
MaxNumOfVol = uint32("max_num_of_vol", "Maximum Number of Volumes")
MaxReadDataReplySize = uint16("max_read_data_reply_size", "Max Read Data Reply Size")
MaxSpace = uint32("maxspace", "Maximum Space")
MaxSpace64 = uint64("maxspace64", "Maximum Space")
MaxUsedDynamicSpace = uint32("max_used_dynamic_space", "Max Used Dynamic Space")
MediaList = uint32("media_list", "Media List")
MediaListCount = uint32("media_list_count", "Media List Count")
MediaName = nstring8("media_name", "Media Name")
MediaNumber = uint32("media_number", "Media Number")
MaxReplyObjectIDCount = uint8("max_reply_obj_id_count", "Max Reply Object ID Count")
MediaObjectType = val_string8("media_object_type", "Object Type", [
[ 0x00, "Adapter" ],
[ 0x01, "Changer" ],
[ 0x02, "Removable Device" ],
[ 0x03, "Device" ],
[ 0x04, "Removable Media" ],
[ 0x05, "Partition" ],
[ 0x06, "Slot" ],
[ 0x07, "Hotfix" ],
[ 0x08, "Mirror" ],
[ 0x09, "Parity" ],
[ 0x0a, "Volume Segment" ],
[ 0x0b, "Volume" ],
[ 0x0c, "Clone" ],
[ 0x0d, "Fixed Media" ],
[ 0x0e, "Unknown" ],
])
MemberName = nstring8("member_name", "Member Name")
MemberType = val_string16("member_type", "Member Type", [
[ 0x0000, "Unknown" ],
[ 0x0001, "User" ],
[ 0x0002, "User group" ],
[ 0x0003, "Print queue" ],
[ 0x0004, "NetWare file server" ],
[ 0x0005, "Job server" ],
[ 0x0006, "Gateway" ],
[ 0x0007, "Print server" ],
[ 0x0008, "Archive queue" ],
[ 0x0009, "Archive server" ],
[ 0x000a, "Job queue" ],
[ 0x000b, "Administration" ],
[ 0x0021, "NAS SNA gateway" ],
[ 0x0026, "Remote bridge server" ],
[ 0x0027, "TCP/IP gateway" ],
])
MessageLanguage = uint32("message_language", "NLM Language")
MigratedFiles = uint32("migrated_files", "Migrated Files")
MigratedSectors = uint32("migrated_sectors", "Migrated Sectors")
MinorVersion = uint32("minor_version", "Minor Version")
MinSpaceLeft64 = uint64("min_space_left64", "Minimum Space Left")
Minute = uint8("s_minute", "Minutes")
MixedModePathFlag = val_string8("mixed_mode_path_flag", "Mixed Mode Path Flag", [
[ 0x00, "Mixed mode path handling is not available"],
[ 0x01, "Mixed mode path handling is available"],
])
ModifiedDate = uint16("modified_date", "Modified Date")
ModifiedDate.NWDate()
ModifiedTime = uint16("modified_time", "Modified Time")
ModifiedTime.NWTime()
ModifierID = uint32("modifier_id", "Modifier ID", ENC_BIG_ENDIAN)
ModifierID.Display("BASE_HEX")
ModifyDOSInfoMask = bitfield16("modify_dos_info_mask", "Modify DOS Info Mask", [
bf_boolean16(0x0002, "modify_dos_read", "Attributes"),
bf_boolean16(0x0004, "modify_dos_write", "Creation Date"),
bf_boolean16(0x0008, "modify_dos_open", "Creation Time"),
bf_boolean16(0x0010, "modify_dos_create", "Creator ID"),
bf_boolean16(0x0020, "modify_dos_delete", "Archive Date"),
bf_boolean16(0x0040, "modify_dos_parent", "Archive Time"),
bf_boolean16(0x0080, "modify_dos_search", "Archiver ID"),
bf_boolean16(0x0100, "modify_dos_mdate", "Modify Date"),
bf_boolean16(0x0200, "modify_dos_mtime", "Modify Time"),
bf_boolean16(0x0400, "modify_dos_mid", "Modifier ID"),
bf_boolean16(0x0800, "modify_dos_laccess", "Last Access"),
bf_boolean16(0x1000, "modify_dos_inheritance", "Inheritance"),
bf_boolean16(0x2000, "modify_dos_max_space", "Maximum Space"),
])
Month = val_string8("s_month", "Month", [
[ 0x01, "January"],
[ 0x02, "February"],
[ 0x03, "March"],
[ 0x04, "April"],
[ 0x05, "May"],
[ 0x06, "June"],
[ 0x07, "July"],
[ 0x08, "August"],
[ 0x09, "September"],
[ 0x0a, "October"],
[ 0x0b, "November"],
[ 0x0c, "December"],
])
MoreFlag = val_string8("more_flag", "More Flag", [
[ 0x00, "No More Segments/Entries Available" ],
[ 0x01, "More Segments/Entries Available" ],
[ 0xff, "More Segments/Entries Available" ],
])
MoreProperties = val_string8("more_properties", "More Properties", [
[ 0x00, "No More Properties Available" ],
[ 0x01, "No More Properties Available" ],
[ 0xff, "More Properties Available" ],
])
Name = nstring8("name", "Name")
Name12 = fw_string("name12", "Name", 12)
NameLen = uint8("name_len", "Name Space Length")
NameLength = uint8("name_length", "Name Length")
NameList = uint32("name_list", "Name List")
#
# XXX - should this value be used to interpret the characters in names,
# search patterns, and the like?
#
# We need to handle character sets better, e.g. translating strings
# from whatever character set they are in the packet (DOS/Windows code
# pages, ISO character sets, UNIX EUC character sets, UTF-8, UCS-2/Unicode,
# Mac character sets, etc.) into UCS-4 or UTF-8 and storing them as such
# in the protocol tree, and displaying them as best we can.
#
NameSpace = val_string8("name_space", "Name Space", [
[ 0x00, "DOS" ],
[ 0x01, "MAC" ],
[ 0x02, "NFS" ],
[ 0x03, "FTAM" ],
[ 0x04, "OS/2, Long" ],
])
NamesSpaceInfoMask = bitfield16("ns_info_mask", "Names Space Info Mask", [
bf_boolean16(0x0001, "ns_info_mask_modify", "Modify Name"),
bf_boolean16(0x0002, "ns_info_mask_fatt", "File Attributes"),
bf_boolean16(0x0004, "ns_info_mask_cdate", "Creation Date"),
bf_boolean16(0x0008, "ns_info_mask_ctime", "Creation Time"),
bf_boolean16(0x0010, "ns_info_mask_owner", "Owner ID"),
bf_boolean16(0x0020, "ns_info_mask_adate", "Archive Date"),
bf_boolean16(0x0040, "ns_info_mask_atime", "Archive Time"),
bf_boolean16(0x0080, "ns_info_mask_aid", "Archiver ID"),
bf_boolean16(0x0100, "ns_info_mask_udate", "Update Date"),
bf_boolean16(0x0200, "ns_info_mask_utime", "Update Time"),
bf_boolean16(0x0400, "ns_info_mask_uid", "Update ID"),
bf_boolean16(0x0800, "ns_info_mask_acc_date", "Access Date"),
bf_boolean16(0x1000, "ns_info_mask_max_acc_mask", "Inheritance"),
bf_boolean16(0x2000, "ns_info_mask_max_space", "Maximum Space"),
])
NameSpaceName = nstring8("name_space_name", "Name Space Name")
nameType = uint32("name_type", "nameType")
NCPdataSize = uint32("ncp_data_size", "NCP Data Size")
NCPEncodedStringsBits = uint32("ncp_encoded_strings_bits", "NCP Encoded Strings Bits")
NCPextensionMajorVersion = uint8("ncp_extension_major_version", "NCP Extension Major Version")
NCPextensionMinorVersion = uint8("ncp_extension_minor_version", "NCP Extension Minor Version")
NCPextensionName = nstring8("ncp_extension_name", "NCP Extension Name")
NCPextensionNumber = uint32("ncp_extension_number", "NCP Extension Number")
NCPextensionNumber.Display("BASE_HEX")
NCPExtensionNumbers = uint32("ncp_extension_numbers", "NCP Extension Numbers")
NCPextensionRevisionNumber = uint8("ncp_extension_revision_number", "NCP Extension Revision Number")
NCPPeakStaInUse = uint32("ncp_peak_sta_in_use", "Peak Number of Connections since Server was brought up")
NCPStaInUseCnt = uint32("ncp_sta_in_use", "Number of Workstations Connected to Server")
NDSRequestFlags = bitfield16("nds_request_flags", "NDS Request Flags", [
bf_boolean16(0x0001, "nds_request_flags_output", "Output Fields"),
bf_boolean16(0x0002, "nds_request_flags_no_such_entry", "No Such Entry"),
bf_boolean16(0x0004, "nds_request_flags_local_entry", "Local Entry"),
bf_boolean16(0x0008, "nds_request_flags_type_ref", "Type Referral"),
bf_boolean16(0x0010, "nds_request_flags_alias_ref", "Alias Referral"),
bf_boolean16(0x0020, "nds_request_flags_req_cnt", "Request Count"),
bf_boolean16(0x0040, "nds_request_flags_req_data_size", "Request Data Size"),
bf_boolean16(0x0080, "nds_request_flags_reply_data_size", "Reply Data Size"),
bf_boolean16(0x0100, "nds_request_flags_trans_ref", "Transport Referral"),
bf_boolean16(0x0200, "nds_request_flags_trans_ref2", "Transport Referral"),
bf_boolean16(0x0400, "nds_request_flags_up_ref", "Up Referral"),
bf_boolean16(0x0800, "nds_request_flags_dn_ref", "Down Referral"),
])
NDSStatus = uint32("nds_status", "NDS Status")
NetBIOSBroadcastWasPropagated = uint32("netbios_broadcast_was_propagated", "NetBIOS Broadcast Was Propagated")
NetIDNumber = uint32("net_id_number", "Net ID Number")
NetIDNumber.Display("BASE_HEX")
NetAddress = nbytes32("address", "Address")
NetStatus = uint16("net_status", "Network Status")
NetWareAccessHandle = bytes("netware_access_handle", "NetWare Access Handle", 6)
NetworkAddress = uint32("network_address", "Network Address")
NetworkAddress.Display("BASE_HEX")
NetworkNodeAddress = bytes("network_node_address", "Network Node Address", 6)
NetworkNumber = uint32("network_number", "Network Number")
NetworkNumber.Display("BASE_HEX")
#
# XXX - this should have the "ipx_socket_vals" value_string table
# from "packet-ipx.c".
#
NetworkSocket = uint16("network_socket", "Network Socket")
NetworkSocket.Display("BASE_HEX")
NewAccessRights = bitfield16("new_access_rights_mask", "New Access Rights", [
bf_boolean16(0x0001, "new_access_rights_read", "Read"),
bf_boolean16(0x0002, "new_access_rights_write", "Write"),
bf_boolean16(0x0004, "new_access_rights_open", "Open"),
bf_boolean16(0x0008, "new_access_rights_create", "Create"),
bf_boolean16(0x0010, "new_access_rights_delete", "Delete"),
bf_boolean16(0x0020, "new_access_rights_parental", "Parental"),
bf_boolean16(0x0040, "new_access_rights_search", "Search"),
bf_boolean16(0x0080, "new_access_rights_modify", "Modify"),
bf_boolean16(0x0100, "new_access_rights_supervisor", "Supervisor"),
])
NewDirectoryID = uint32("new_directory_id", "New Directory ID", ENC_BIG_ENDIAN)
NewDirectoryID.Display("BASE_HEX")
NewEAHandle = uint32("new_ea_handle", "New EA Handle")
NewEAHandle.Display("BASE_HEX")
NewFileName = fw_string("new_file_name", "New File Name", 14)
NewFileNameLen = nstring8("new_file_name_len", "New File Name")
NewFileSize = uint32("new_file_size", "New File Size")
NewPassword = nstring8("new_password", "New Password")
NewPath = nstring8("new_path", "New Path")
NewPosition = uint8("new_position", "New Position")
NewObjectName = nstring8("new_object_name", "New Object Name")
NextCntBlock = uint32("next_cnt_block", "Next Count Block")
NextHugeStateInfo = bytes("next_huge_state_info", "Next Huge State Info", 16)
nextLimbScanNum = uint32("next_limb_scan_num", "Next Limb Scan Number")
NextObjectID = uint32("next_object_id", "Next Object ID", ENC_BIG_ENDIAN)
NextObjectID.Display("BASE_HEX")
NextRecord = uint32("next_record", "Next Record")
NextRequestRecord = uint16("next_request_record", "Next Request Record")
NextSearchIndex = uint16("next_search_index", "Next Search Index")
NextSearchNumber = uint16("next_search_number", "Next Search Number")
NextSearchNum = uint32("nxt_search_num", "Next Search Number")
nextStartingNumber = uint32("next_starting_number", "Next Starting Number")
NextTrusteeEntry = uint32("next_trustee_entry", "Next Trustee Entry")
NextVolumeNumber = uint32("next_volume_number", "Next Volume Number")
NLMBuffer = nstring8("nlm_buffer", "Buffer")
NLMcount = uint32("nlm_count", "NLM Count")
NLMFlags = bitfield8("nlm_flags", "Flags", [
bf_boolean8(0x01, "nlm_flags_reentrant", "ReEntrant"),
bf_boolean8(0x02, "nlm_flags_multiple", "Can Load Multiple Times"),
bf_boolean8(0x04, "nlm_flags_synchronize", "Synchronize Start"),
bf_boolean8(0x08, "nlm_flags_pseudo", "PseudoPreemption"),
])
NLMLoadOptions = uint32("nlm_load_options", "NLM Load Options")
NLMName = stringz("nlm_name_stringz", "NLM Name")
NLMNumber = uint32("nlm_number", "NLM Number")
NLMNumbers = uint32("nlm_numbers", "NLM Numbers")
NLMsInList = uint32("nlms_in_list", "NLM's in List")
NLMStartNumber = uint32("nlm_start_num", "NLM Start Number")
NLMType = val_string8("nlm_type", "NLM Type", [
[ 0x00, "Generic NLM (.NLM)" ],
[ 0x01, "LAN Driver (.LAN)" ],
[ 0x02, "Disk Driver (.DSK)" ],
[ 0x03, "Name Space Support Module (.NAM)" ],
[ 0x04, "Utility or Support Program (.NLM)" ],
[ 0x05, "Mirrored Server Link (.MSL)" ],
[ 0x06, "OS NLM (.NLM)" ],
[ 0x07, "Paged High OS NLM (.NLM)" ],
[ 0x08, "Host Adapter Module (.HAM)" ],
[ 0x09, "Custom Device Module (.CDM)" ],
[ 0x0a, "File System Engine (.NLM)" ],
[ 0x0b, "Real Mode NLM (.NLM)" ],
[ 0x0c, "Hidden NLM (.NLM)" ],
[ 0x15, "NICI Support (.NLM)" ],
[ 0x16, "NICI Support (.NLM)" ],
[ 0x17, "Cryptography (.NLM)" ],
[ 0x18, "Encryption (.NLM)" ],
[ 0x19, "NICI Support (.NLM)" ],
[ 0x1c, "NICI Support (.NLM)" ],
])
nodeFlags = uint32("node_flags", "Node Flags")
nodeFlags.Display("BASE_HEX")
NoMoreMemAvlCnt = uint32("no_more_mem_avail", "No More Memory Available Count")
NonDedFlag = boolean8("non_ded_flag", "Non Dedicated Flag")
NonFreeableAvailableSubAllocSectors = uint32("non_freeable_avail_sub_alloc_sectors", "Non Freeable Available Sub Alloc Sectors")
NonFreeableLimboSectors = uint32("non_freeable_limbo_sectors", "Non Freeable Limbo Sectors")
NotUsableSubAllocSectors = uint32("not_usable_sub_alloc_sectors", "Not Usable Sub Alloc Sectors")
NotYetPurgeableBlocks = uint32("not_yet_purgeable_blocks", "Not Yet Purgeable Blocks")
NSInfoBitMask = uint32("ns_info_bit_mask", "Name Space Info Bit Mask")
NSSOAllInFlags = bitfield32("nsso_all_in_flags", "SecretStore All Input Flags",[
bf_boolean32(0x00000010, "nsso_all_unicode", "Unicode Data"),
bf_boolean32(0x00000080, "nsso_set_tree", "Set Tree"),
bf_boolean32(0x00000200, "nsso_destroy_ctx", "Destroy Context"),
])
NSSOGetServiceInFlags = bitfield32("nsso_get_svc_in_flags", "SecretStore Get Service Flags",[
bf_boolean32(0x00000100, "nsso_get_ctx", "Get Context"),
])
NSSOReadInFlags = bitfield32("nsso_read_in_flags", "SecretStore Read Flags",[
bf_boolean32(0x00000001, "nsso_rw_enh_prot", "Read/Write Enhanced Protection"),
bf_boolean32(0x00000008, "nsso_repair", "Repair SecretStore"),
])
NSSOReadOrUnlockInFlags = bitfield32("nsso_read_or_unlock_in_flags", "SecretStore Read or Unlock Flags",[
bf_boolean32(0x00000004, "nsso_ep_master_pwd", "Master Password used instead of ENH Password"),
])
NSSOUnlockInFlags = bitfield32("nsso_unlock_in_flags", "SecretStore Unlock Flags",[
bf_boolean32(0x00000004, "nsso_rmv_lock", "Remove Lock from Store"),
])
NSSOWriteInFlags = bitfield32("nsso_write_in_flags", "SecretStore Write Flags",[
bf_boolean32(0x00000001, "nsso_enh_prot", "Enhanced Protection"),
bf_boolean32(0x00000002, "nsso_create_id", "Create ID"),
bf_boolean32(0x00000040, "nsso_ep_pwd_used", "Enhanced Protection Password Used"),
])
NSSOContextOutFlags = bitfield32("nsso_cts_out_flags", "Type of Context",[
bf_boolean32(0x00000001, "nsso_ds_ctx", "DSAPI Context"),
bf_boolean32(0x00000080, "nsso_ldap_ctx", "LDAP Context"),
bf_boolean32(0x00000200, "nsso_dc_ctx", "Reserved"),
])
NSSOGetServiceOutFlags = bitfield32("nsso_get_svc_out_flags", "SecretStore Status Flags",[
bf_boolean32(0x00400000, "nsso_mstr_pwd", "Master Password Present"),
])
NSSOGetServiceReadOutFlags = bitfield32("nsso_get_svc_read_out_flags", "SecretStore Status Flags",[
bf_boolean32(0x00800000, "nsso_mp_disabled", "Master Password Disabled"),
])
NSSOReadOutFlags = bitfield32("nsso_read_out_flags", "SecretStore Read Flags",[
bf_boolean32(0x00010000, "nsso_secret_locked", "Enhanced Protection Lock on Secret"),
bf_boolean32(0x00020000, "nsso_secret_not_init", "Secret Not Yet Initialized"),
bf_boolean32(0x00040000, "nsso_secret_marked", "Secret Marked for Enhanced Protection"),
bf_boolean32(0x00080000, "nsso_secret_not_sync", "Secret Not Yet Synchronized in NDS"),
bf_boolean32(0x00200000, "nsso_secret_enh_pwd", "Enhanced Protection Password on Secret"),
])
NSSOReadOutStatFlags = bitfield32("nsso_read_out_stat_flags", "SecretStore Read Status Flags",[
bf_boolean32(0x00100000, "nsso_admin_mod", "Admin Modified Secret Last"),
])
NSSOVerb = val_string8("nsso_verb", "SecretStore Verb", [
[ 0x00, "Query Server" ],
[ 0x01, "Read App Secrets" ],
[ 0x02, "Write App Secrets" ],
[ 0x03, "Add Secret ID" ],
[ 0x04, "Remove Secret ID" ],
[ 0x05, "Remove SecretStore" ],
[ 0x06, "Enumerate SecretID's" ],
[ 0x07, "Unlock Store" ],
[ 0x08, "Set Master Password" ],
[ 0x09, "Get Service Information" ],
])
NSSpecificInfo = fw_string("ns_specific_info", "Name Space Specific Info", 512)
NumberOfActiveTasks = uint8("num_of_active_tasks", "Number of Active Tasks")
NumberOfAllocs = uint32("num_of_allocs", "Number of Allocations")
NumberOfCPUs = uint32("number_of_cpus", "Number of CPU's")
NumberOfDataStreams = uint16("number_of_data_streams", "Number of Data Streams")
NumberOfDataStreamsLong = uint32("number_of_data_streams_long", "Number of Data Streams")
NumberOfDynamicMemoryAreas = uint16("number_of_dynamic_memory_areas", "Number Of Dynamic Memory Areas")
NumberOfEntries = uint8("number_of_entries", "Number of Entries")
NumberOfEntriesLong = uint32("number_of_entries_long", "Number of Entries")
NumberOfLocks = uint8("number_of_locks", "Number of Locks")
NumberOfMinutesToDelay = uint32("number_of_minutes_to_delay", "Number of Minutes to Delay")
NumberOfNCPExtensions = uint32("number_of_ncp_extensions", "Number Of NCP Extensions")
NumberOfNSLoaded = uint16("number_of_ns_loaded", "Number Of Name Spaces Loaded")
NumberOfProtocols = uint8("number_of_protocols", "Number of Protocols")
NumberOfRecords = uint16("number_of_records", "Number of Records")
NumberOfReferencedPublics = uint32("num_of_ref_publics", "Number of Referenced Public Symbols")
NumberOfSemaphores = uint16("number_of_semaphores", "Number Of Semaphores")
NumberOfServiceProcesses = uint8("number_of_service_processes", "Number Of Service Processes")
NumberOfSetCategories = uint32("number_of_set_categories", "Number Of Set Categories")
NumberOfSMs = uint32("number_of_sms", "Number Of Storage Medias")
NumberOfStations = uint8("number_of_stations", "Number of Stations")
NumBytes = uint16("num_bytes", "Number of Bytes")
NumBytesLong = uint32("num_bytes_long", "Number of Bytes")
NumOfCCinPkt = uint32("num_of_cc_in_pkt", "Number of Custom Counters in Packet")
NumOfChecks = uint32("num_of_checks", "Number of Checks")
NumOfEntries = uint32("num_of_entries", "Number of Entries")
NumOfFilesMigrated = uint32("num_of_files_migrated", "Number Of Files Migrated")
NumOfGarbageColl = uint32("num_of_garb_coll", "Number of Garbage Collections")
NumOfNCPReqs = uint32("num_of_ncp_reqs", "Number of NCP Requests since Server was brought up")
NumOfSegments = uint32("num_of_segments", "Number of Segments")
ObjectCount = uint32("object_count", "Object Count")
ObjectFlags = val_string8("object_flags", "Object Flags", [
[ 0x00, "Dynamic object" ],
[ 0x01, "Static object" ],
])
ObjectHasProperties = val_string8("object_has_properites", "Object Has Properties", [
[ 0x00, "No properties" ],
[ 0xff, "One or more properties" ],
])
ObjectID = uint32("object_id", "Object ID", ENC_BIG_ENDIAN)
ObjectID.Display('BASE_HEX')
ObjectIDCount = uint16("object_id_count", "Object ID Count")
ObjectIDInfo = uint32("object_id_info", "Object Information")
ObjectInfoReturnCount = uint32("object_info_rtn_count", "Object Information Count")
ObjectName = nstring8("object_name", "Object Name")
ObjectNameLen = fw_string("object_name_len", "Object Name", 48)
ObjectNameStringz = stringz("object_name_stringz", "Object Name")
ObjectNumber = uint32("object_number", "Object Number")
ObjectSecurity = val_string8("object_security", "Object Security", [
[ 0x00, "Object Read (Anyone) / Object Write (Anyone)" ],
[ 0x01, "Object Read (Logged in) / Object Write (Anyone)" ],
[ 0x02, "Object Read (Logged in as Object) / Object Write (Anyone)" ],
[ 0x03, "Object Read (Supervisor) / Object Write (Anyone)" ],
[ 0x04, "Object Read (Operating System Only) / Object Write (Anyone)" ],
[ 0x10, "Object Read (Anyone) / Object Write (Logged in)" ],
[ 0x11, "Object Read (Logged in) / Object Write (Logged in)" ],
[ 0x12, "Object Read (Logged in as Object) / Object Write (Logged in)" ],
[ 0x13, "Object Read (Supervisor) / Object Write (Logged in)" ],
[ 0x14, "Object Read (Operating System Only) / Object Write (Logged in)" ],
[ 0x20, "Object Read (Anyone) / Object Write (Logged in as Object)" ],
[ 0x21, "Object Read (Logged in) / Object Write (Logged in as Object)" ],
[ 0x22, "Object Read (Logged in as Object) / Object Write (Logged in as Object)" ],
[ 0x23, "Object Read (Supervisor) / Object Write (Logged in as Object)" ],
[ 0x24, "Object Read (Operating System Only) / Object Write (Logged in as Object)" ],
[ 0x30, "Object Read (Anyone) / Object Write (Supervisor)" ],
[ 0x31, "Object Read (Logged in) / Object Write (Supervisor)" ],
[ 0x32, "Object Read (Logged in as Object) / Object Write (Supervisor)" ],
[ 0x33, "Object Read (Supervisor) / Object Write (Supervisor)" ],
[ 0x34, "Object Read (Operating System Only) / Object Write (Supervisor)" ],
[ 0x40, "Object Read (Anyone) / Object Write (Operating System Only)" ],
[ 0x41, "Object Read (Logged in) / Object Write (Operating System Only)" ],
[ 0x42, "Object Read (Logged in as Object) / Object Write (Operating System Only)" ],
[ 0x43, "Object Read (Supervisor) / Object Write (Operating System Only)" ],
[ 0x44, "Object Read (Operating System Only) / Object Write (Operating System Only)" ],
])
#
# XXX - should this use the "server_vals[]" value_string array from
# "packet-ipx.c"?
#
# XXX - should this list be merged with that list? There are some
# oddities, e.g. this list has 0x03f5 for "Microsoft SQL Server", but
# the list from "packet-ipx.c" has 0xf503 for that - is that just
# byte-order confusion?
#
ObjectType = val_string16("object_type", "Object Type", [
[ 0x0000, "Unknown" ],
[ 0x0001, "User" ],
[ 0x0002, "User group" ],
[ 0x0003, "Print queue" ],
[ 0x0004, "NetWare file server" ],
[ 0x0005, "Job server" ],
[ 0x0006, "Gateway" ],
[ 0x0007, "Print server" ],
[ 0x0008, "Archive queue" ],
[ 0x0009, "Archive server" ],
[ 0x000a, "Job queue" ],
[ 0x000b, "Administration" ],
[ 0x0021, "NAS SNA gateway" ],
[ 0x0026, "Remote bridge server" ],
[ 0x0027, "TCP/IP gateway" ],
[ 0x0047, "Novell Print Server" ],
[ 0x004b, "Btrieve Server" ],
[ 0x004c, "NetWare SQL Server" ],
[ 0x0064, "ARCserve" ],
[ 0x0066, "ARCserve 3.0" ],
[ 0x0076, "NetWare SQL" ],
[ 0x00a0, "Gupta SQL Base Server" ],
[ 0x00a1, "Powerchute" ],
[ 0x0107, "NetWare Remote Console" ],
[ 0x01cb, "Shiva NetModem/E" ],
[ 0x01cc, "Shiva LanRover/E" ],
[ 0x01cd, "Shiva LanRover/T" ],
[ 0x01d8, "Castelle FAXPress Server" ],
[ 0x01da, "Castelle Print Server" ],
[ 0x01dc, "Castelle Fax Server" ],
[ 0x0200, "Novell SQL Server" ],
[ 0x023a, "NetWare Lanalyzer Agent" ],
[ 0x023c, "DOS Target Service Agent" ],
[ 0x023f, "NetWare Server Target Service Agent" ],
[ 0x024f, "Appletalk Remote Access Service" ],
[ 0x0263, "NetWare Management Agent" ],
[ 0x0264, "Global MHS" ],
[ 0x0265, "SNMP" ],
[ 0x026a, "NetWare Management/NMS Console" ],
[ 0x026b, "NetWare Time Synchronization" ],
[ 0x0273, "Nest Device" ],
[ 0x0274, "GroupWise Message Multiple Servers" ],
[ 0x0278, "NDS Replica Server" ],
[ 0x0282, "NDPS Service Registry Service" ],
[ 0x028a, "MPR/IPX Address Mapping Gateway" ],
[ 0x028b, "ManageWise" ],
[ 0x0293, "NetWare 6" ],
[ 0x030c, "HP JetDirect" ],
[ 0x0328, "Watcom SQL Server" ],
[ 0x0355, "Backup Exec" ],
[ 0x039b, "Lotus Notes" ],
[ 0x03e1, "Univel Server" ],
[ 0x03f5, "Microsoft SQL Server" ],
[ 0x055e, "Lexmark Print Server" ],
[ 0x0640, "Microsoft Gateway Services for NetWare" ],
[ 0x064e, "Microsoft Internet Information Server" ],
[ 0x077b, "Advantage Database Server" ],
[ 0x07a7, "Backup Exec Job Queue" ],
[ 0x07a8, "Backup Exec Job Manager" ],
[ 0x07a9, "Backup Exec Job Service" ],
[ 0x5555, "Site Lock" ],
[ 0x8202, "NDPS Broker" ],
])
OCRetFlags = val_string8("o_c_ret_flags", "Open Create Return Flags", [
[ 0x00, "No CallBack has been registered (No Op-Lock)" ],
[ 0x01, "Request has been registered for CallBack (Op-Lock)" ],
])
OESServer = val_string8("oes_server", "Type of Novell Server", [
[ 0x00, "NetWare" ],
[ 0x01, "OES" ],
[ 0x02, "OES 64bit" ],
])
OESLinuxOrNetWare = val_string8("oeslinux_or_netware", "Kernel Type", [
[ 0x00, "NetWare" ],
[ 0x01, "Linux" ],
])
OldestDeletedFileAgeInTicks = uint32("oldest_deleted_file_age_in_ticks", "Oldest Deleted File Age in Ticks")
OldFileName = bytes("old_file_name", "Old File Name", 15)
OldFileSize = uint32("old_file_size", "Old File Size")
OpenCount = uint16("open_count", "Open Count")
OpenCreateAction = bitfield8("open_create_action", "Open Create Action", [
bf_boolean8(0x01, "open_create_action_opened", "Opened"),
bf_boolean8(0x02, "open_create_action_created", "Created"),
bf_boolean8(0x04, "open_create_action_replaced", "Replaced"),
bf_boolean8(0x08, "open_create_action_compressed", "Compressed"),
bf_boolean8(0x80, "open_create_action_read_only", "Read Only"),
])
OpenCreateMode = bitfield8("open_create_mode", "Open Create Mode", [
bf_boolean8(0x01, "open_create_mode_open", "Open existing file (file must exist)"),
bf_boolean8(0x02, "open_create_mode_replace", "Replace existing file"),
bf_boolean8(0x08, "open_create_mode_create", "Create new file or subdirectory (file or subdirectory cannot exist)"),
bf_boolean8(0x20, "open_create_mode_64bit", "Open 64-bit Access"),
bf_boolean8(0x40, "open_create_mode_ro", "Open with Read Only Access"),
bf_boolean8(0x80, "open_create_mode_oplock", "Open Callback (Op-Lock)"),
])
OpenForReadCount = uint16("open_for_read_count", "Open For Read Count")
OpenForWriteCount = uint16("open_for_write_count", "Open For Write Count")
OpenRights = bitfield8("open_rights", "Open Rights", [
bf_boolean8(0x01, "open_rights_read_only", "Read Only"),
bf_boolean8(0x02, "open_rights_write_only", "Write Only"),
bf_boolean8(0x04, "open_rights_deny_read", "Deny Read"),
bf_boolean8(0x08, "open_rights_deny_write", "Deny Write"),
bf_boolean8(0x10, "open_rights_compat", "Compatibility"),
bf_boolean8(0x40, "open_rights_write_thru", "File Write Through"),
])
OptionNumber = uint8("option_number", "Option Number")
originalSize = uint32("original_size", "Original Size")
OSLanguageID = uint8("os_language_id", "OS Language ID")
OSMajorVersion = uint8("os_major_version", "OS Major Version")
OSMinorVersion = uint8("os_minor_version", "OS Minor Version")
OSRevision = uint32("os_revision", "OS Revision")
OtherFileForkSize = uint32("other_file_fork_size", "Other File Fork Size")
OtherFileForkFAT = uint32("other_file_fork_fat", "Other File Fork FAT Entry")
OutgoingPacketDiscardedNoTurboBuffer = uint16("outgoing_packet_discarded_no_turbo_buffer", "Outgoing Packet Discarded No Turbo Buffer")
PacketsDiscardedByHopCount = uint16("packets_discarded_by_hop_count", "Packets Discarded By Hop Count")
PacketsDiscardedUnknownNet = uint16("packets_discarded_unknown_net", "Packets Discarded Unknown Net")
PacketsFromInvalidConnection = uint16("packets_from_invalid_connection", "Packets From Invalid Connection")
PacketsReceivedDuringProcessing = uint16("packets_received_during_processing", "Packets Received During Processing")
PacketsWithBadRequestType = uint16("packets_with_bad_request_type", "Packets With Bad Request Type")
PacketsWithBadSequenceNumber = uint16("packets_with_bad_sequence_number", "Packets With Bad Sequence Number")
PageTableOwnerFlag = uint32("page_table_owner_flag", "Page Table Owner")
ParentID = uint32("parent_id", "Parent ID")
ParentID.Display("BASE_HEX")
ParentBaseID = uint32("parent_base_id", "Parent Base ID")
ParentBaseID.Display("BASE_HEX")
ParentDirectoryBase = uint32("parent_directory_base", "Parent Directory Base")
ParentDOSDirectoryBase = uint32("parent_dos_directory_base", "Parent DOS Directory Base")
ParentObjectNumber = uint32("parent_object_number", "Parent Object Number")
ParentObjectNumber.Display("BASE_HEX")
Password = nstring8("password", "Password")
PathBase = uint8("path_base", "Path Base")
PathComponentCount = uint16("path_component_count", "Path Component Count")
PathComponentSize = uint16("path_component_size", "Path Component Size")
PathCookieFlags = val_string16("path_cookie_flags", "Path Cookie Flags", [
[ 0x0000, "Last component is Not a File Name" ],
[ 0x0001, "Last component is a File Name" ],
])
PathCount = uint8("path_count", "Path Count")
#
# XXX - in at least some File Search Continue requests, the string
# length value is longer than the string, and there's a NUL, followed
# by other non-zero cruft, in the string. Should this be an
# "nstringz8", with FT_UINT_STRINGZPAD added to support it? And
# does that apply to any other values?
#
Path = nstring8("path", "Path")
Path16 = nstring16("path16", "Path")
PathAndName = stringz("path_and_name", "Path and Name")
PendingIOCommands = uint16("pending_io_commands", "Pending IO Commands")
PhysicalDiskNumber = uint8("physical_disk_number", "Physical Disk Number")
PhysicalDriveCount = uint8("physical_drive_count", "Physical Drive Count")
PhysicalLockThreshold = uint8("physical_lock_threshold", "Physical Lock Threshold")
PingVersion = uint16("ping_version", "Ping Version")
PoolName = stringz("pool_name", "Pool Name")
PositiveAcknowledgesSent = uint16("positive_acknowledges_sent", "Positive Acknowledges Sent")
PreCompressedSectors = uint32("pre_compressed_sectors", "Precompressed Sectors")
PreviousRecord = uint32("previous_record", "Previous Record")
PrimaryEntry = uint32("primary_entry", "Primary Entry")
PrintFlags = bitfield8("print_flags", "Print Flags", [
bf_boolean8(0x08, "print_flags_ff", "Suppress Form Feeds"),
bf_boolean8(0x10, "print_flags_cr", "Create"),
bf_boolean8(0x20, "print_flags_del_spool", "Delete Spool File after Printing"),
bf_boolean8(0x40, "print_flags_exp_tabs", "Expand Tabs in the File"),
bf_boolean8(0x80, "print_flags_banner", "Print Banner Page"),
])
PrinterHalted = val_string8("printer_halted", "Printer Halted", [
[ 0x00, "Printer is not Halted" ],
[ 0xff, "Printer is Halted" ],
])
PrinterOffLine = val_string8( "printer_offline", "Printer Off-Line", [
[ 0x00, "Printer is On-Line" ],
[ 0xff, "Printer is Off-Line" ],
])
PrintServerVersion = uint8("print_server_version", "Print Server Version")
Priority = uint32("priority", "Priority")
Privileges = uint32("privileges", "Login Privileges")
ProcessorType = val_string8("processor_type", "Processor Type", [
[ 0x00, "Motorola 68000" ],
[ 0x01, "Intel 8088 or 8086" ],
[ 0x02, "Intel 80286" ],
])
ProDOSInfo = bytes("pro_dos_info", "Pro DOS Info", 6)
ProductMajorVersion = uint16("product_major_version", "Product Major Version")
ProductMinorVersion = uint16("product_minor_version", "Product Minor Version")
ProductRevisionVersion = uint8("product_revision_version", "Product Revision Version")
projectedCompSize = uint32("projected_comp_size", "Projected Compression Size")
PropertyHasMoreSegments = val_string8("property_has_more_segments",
"Property Has More Segments", [
[ 0x00, "Is last segment" ],
[ 0xff, "More segments are available" ],
])
PropertyName = nstring8("property_name", "Property Name")
PropertyName16 = fw_string("property_name_16", "Property Name", 16)
PropertyData = bytes("property_data", "Property Data", 128)
PropertySegment = uint8("property_segment", "Property Segment")
PropertyType = val_string8("property_type", "Property Type", [
[ 0x00, "Display Static property" ],
[ 0x01, "Display Dynamic property" ],
[ 0x02, "Set Static property" ],
[ 0x03, "Set Dynamic property" ],
])
PropertyValue = fw_string("property_value", "Property Value", 128)
ProposedMaxSize = uint16("proposed_max_size", "Proposed Max Size")
ProposedMaxSize64 = uint64("proposed_max_size64", "Proposed Max Size")
protocolFlags = uint32("protocol_flags", "Protocol Flags")
protocolFlags.Display("BASE_HEX")
PurgeableBlocks = uint32("purgeable_blocks", "Purgeable Blocks")
PurgeCcode = uint32("purge_c_code", "Purge Completion Code")
PurgeCount = uint32("purge_count", "Purge Count")
PurgeFlags = val_string16("purge_flags", "Purge Flags", [
[ 0x0000, "Do not Purge All" ],
[ 0x0001, "Purge All" ],
[ 0xffff, "Do not Purge All" ],
])
PurgeList = uint32("purge_list", "Purge List")
PhysicalDiskChannel = uint8("physical_disk_channel", "Physical Disk Channel")
PhysicalDriveType = val_string8("physical_drive_type", "Physical Drive Type", [
[ 0x01, "XT" ],
[ 0x02, "AT" ],
[ 0x03, "SCSI" ],
[ 0x04, "Disk Coprocessor" ],
[ 0x05, "PS/2 with MFM Controller" ],
[ 0x06, "PS/2 with ESDI Controller" ],
[ 0x07, "Convergent Technology SBIC" ],
])
PhysicalReadErrors = uint16("physical_read_errors", "Physical Read Errors")
PhysicalReadRequests = uint32("physical_read_requests", "Physical Read Requests")
PhysicalWriteErrors = uint16("physical_write_errors", "Physical Write Errors")
PhysicalWriteRequests = uint32("physical_write_requests", "Physical Write Requests")
PrintToFileFlag = boolean8("print_to_file_flag", "Print to File Flag")
QueueID = uint32("queue_id", "Queue ID")
QueueID.Display("BASE_HEX")
QueueName = nstring8("queue_name", "Queue Name")
QueueStartPosition = uint32("queue_start_position", "Queue Start Position")
QueueStatus = bitfield8("queue_status", "Queue Status", [
bf_boolean8(0x01, "queue_status_new_jobs", "Operator does not want to add jobs to the queue"),
bf_boolean8(0x02, "queue_status_pserver", "Operator does not want additional servers attaching"),
bf_boolean8(0x04, "queue_status_svc_jobs", "Operator does not want servers to service jobs"),
])
QueueType = uint16("queue_type", "Queue Type")
QueueingVersion = uint8("qms_version", "QMS Version")
ReadBeyondWrite = uint16("read_beyond_write", "Read Beyond Write")
RecordLockCount = uint16("rec_lock_count", "Record Lock Count")
RecordStart = uint32("record_start", "Record Start")
RecordEnd = uint32("record_end", "Record End")
RecordInUseFlag = val_string16("record_in_use", "Record in Use", [
[ 0x0000, "Record In Use" ],
[ 0xffff, "Record Not In Use" ],
])
RedirectedPrinter = uint8( "redirected_printer", "Redirected Printer" )
ReferenceCount = uint32("reference_count", "Reference Count")
RelationsCount = uint16("relations_count", "Relations Count")
ReMirrorCurrentOffset = uint32("re_mirror_current_offset", "ReMirror Current Offset")
ReMirrorDriveNumber = uint8("re_mirror_drive_number", "ReMirror Drive Number")
RemoteMaxPacketSize = uint32("remote_max_packet_size", "Remote Max Packet Size")
RemoteTargetID = uint32("remote_target_id", "Remote Target ID")
RemoteTargetID.Display("BASE_HEX")
RemovableFlag = uint16("removable_flag", "Removable Flag")
RemoveOpenRights = bitfield8("remove_open_rights", "Remove Open Rights", [
bf_boolean8(0x01, "remove_open_rights_ro", "Read Only"),
bf_boolean8(0x02, "remove_open_rights_wo", "Write Only"),
bf_boolean8(0x04, "remove_open_rights_dr", "Deny Read"),
bf_boolean8(0x08, "remove_open_rights_dw", "Deny Write"),
bf_boolean8(0x10, "remove_open_rights_comp", "Compatibility"),
bf_boolean8(0x40, "remove_open_rights_write_thru", "Write Through"),
])
RenameFlag = bitfield8("rename_flag", "Rename Flag", [
bf_boolean8(0x01, "rename_flag_ren", "Rename to Myself allows file to be renamed to its original name"),
bf_boolean8(0x02, "rename_flag_comp", "Compatibility allows files that are marked read only to be opened with read/write access"),
bf_boolean8(0x04, "rename_flag_no", "Name Only renames only the specified name space entry name"),
])
RepliesCancelled = uint16("replies_cancelled", "Replies Cancelled")
ReplyBuffer = nstring8("reply_buffer", "Reply Buffer")
ReplyBufferSize = uint32("reply_buffer_size", "Reply Buffer Size")
ReplyQueueJobNumbers = uint32("reply_queue_job_numbers", "Reply Queue Job Numbers")
RequestBitMap = bitfield16("request_bit_map", "Request Bit Map", [
bf_boolean16(0x0001, "request_bit_map_ret_afp_ent", "AFP Entry ID"),
bf_boolean16(0x0002, "request_bit_map_ret_data_fork", "Data Fork Length"),
bf_boolean16(0x0004, "request_bit_map_ret_res_fork", "Resource Fork Length"),
bf_boolean16(0x0008, "request_bit_map_ret_num_off", "Number of Offspring"),
bf_boolean16(0x0010, "request_bit_map_ret_owner", "Owner ID"),
bf_boolean16(0x0020, "request_bit_map_ret_short", "Short Name"),
bf_boolean16(0x0040, "request_bit_map_ret_acc_priv", "Access Privileges"),
bf_boolean16(0x0100, "request_bit_map_ratt", "Return Attributes"),
bf_boolean16(0x0200, "request_bit_map_ret_afp_parent", "AFP Parent Entry ID"),
bf_boolean16(0x0400, "request_bit_map_ret_cr_date", "Creation Date"),
bf_boolean16(0x0800, "request_bit_map_ret_acc_date", "Access Date"),
bf_boolean16(0x1000, "request_bit_map_ret_mod_date", "Modify Date&Time"),
bf_boolean16(0x2000, "request_bit_map_ret_bak_date", "Backup Date&Time"),
bf_boolean16(0x4000, "request_bit_map_ret_finder", "Finder Info"),
bf_boolean16(0x8000, "request_bit_map_ret_long_nm", "Long Name"),
])
ResourceForkLen = uint32("resource_fork_len", "Resource Fork Len")
RequestCode = val_string8("request_code", "Request Code", [
[ 0x00, "Change Logged in to Temporary Authenticated" ],
[ 0x01, "Change Temporary Authenticated to Logged in" ],
])
RequestData = nstring8("request_data", "Request Data")
RequestsReprocessed = uint16("requests_reprocessed", "Requests Reprocessed")
Reserved = uint8( "reserved", "Reserved" )
Reserved2 = bytes("reserved2", "Reserved", 2)
Reserved3 = bytes("reserved3", "Reserved", 3)
Reserved4 = bytes("reserved4", "Reserved", 4)
Reserved5 = bytes("reserved5", "Reserved", 5)
Reserved6 = bytes("reserved6", "Reserved", 6)
Reserved8 = bytes("reserved8", "Reserved", 8)
Reserved10 = bytes("reserved10", "Reserved", 10)
Reserved12 = bytes("reserved12", "Reserved", 12)
Reserved16 = bytes("reserved16", "Reserved", 16)
Reserved20 = bytes("reserved20", "Reserved", 20)
Reserved28 = bytes("reserved28", "Reserved", 28)
Reserved36 = bytes("reserved36", "Reserved", 36)
Reserved44 = bytes("reserved44", "Reserved", 44)
Reserved48 = bytes("reserved48", "Reserved", 48)
Reserved50 = bytes("reserved50", "Reserved", 50)
Reserved56 = bytes("reserved56", "Reserved", 56)
Reserved64 = bytes("reserved64", "Reserved", 64)
Reserved120 = bytes("reserved120", "Reserved", 120)
ReservedOrDirectoryNumber = uint32("reserved_or_directory_number", "Reserved or Directory Number (see EAFlags)")
ReservedOrDirectoryNumber.Display("BASE_HEX")
ResourceCount = uint32("resource_count", "Resource Count")
ResourceForkSize = uint32("resource_fork_size", "Resource Fork Size")
ResourceName = stringz("resource_name", "Resource Name")
ResourceSignature = fw_string("resource_sig", "Resource Signature", 4)
RestoreTime = eptime("restore_time", "Restore Time")
Restriction = uint32("restriction", "Disk Space Restriction")
RestrictionQuad = uint64("restriction_quad", "Restriction")
RestrictionsEnforced = val_string8("restrictions_enforced", "Disk Restrictions Enforce Flag", [
[ 0x00, "Enforced" ],
[ 0xff, "Not Enforced" ],
])
ReturnInfoCount = uint32("return_info_count", "Return Information Count")
ReturnInfoMask = bitfield16("ret_info_mask", "Return Information", [
bf_boolean16(0x0001, "ret_info_mask_fname", "Return File Name Information"),
bf_boolean16(0x0002, "ret_info_mask_alloc", "Return Allocation Space Information"),
bf_boolean16(0x0004, "ret_info_mask_attr", "Return Attribute Information"),
bf_boolean16(0x0008, "ret_info_mask_size", "Return Size Information"),
bf_boolean16(0x0010, "ret_info_mask_tspace", "Return Total Space Information"),
bf_boolean16(0x0020, "ret_info_mask_eattr", "Return Extended Attributes Information"),
bf_boolean16(0x0040, "ret_info_mask_arch", "Return Archive Information"),
bf_boolean16(0x0080, "ret_info_mask_mod", "Return Modify Information"),
bf_boolean16(0x0100, "ret_info_mask_create", "Return Creation Information"),
bf_boolean16(0x0200, "ret_info_mask_ns", "Return Name Space Information"),
bf_boolean16(0x0400, "ret_info_mask_dir", "Return Directory Information"),
bf_boolean16(0x0800, "ret_info_mask_rights", "Return Rights Information"),
bf_boolean16(0x1000, "ret_info_mask_id", "Return ID Information"),
bf_boolean16(0x2000, "ret_info_mask_ns_attr", "Return Name Space Attributes Information"),
bf_boolean16(0x4000, "ret_info_mask_actual", "Return Actual Information"),
bf_boolean16(0x8000, "ret_info_mask_logical", "Return Logical Information"),
])
ReturnedListCount = uint32("returned_list_count", "Returned List Count")
Revision = uint32("revision", "Revision")
RevisionNumber = uint8("revision_number", "Revision")
RevQueryFlag = val_string8("rev_query_flag", "Revoke Rights Query Flag", [
[ 0x00, "Do not query the locks engine for access rights" ],
[ 0x01, "Query the locks engine and return the access rights" ],
])
RightsGrantMask = bitfield8("rights_grant_mask", "Grant Rights", [
bf_boolean8(0x01, "rights_grant_mask_read", "Read"),
bf_boolean8(0x02, "rights_grant_mask_write", "Write"),
bf_boolean8(0x04, "rights_grant_mask_open", "Open"),
bf_boolean8(0x08, "rights_grant_mask_create", "Create"),
bf_boolean8(0x10, "rights_grant_mask_del", "Delete"),
bf_boolean8(0x20, "rights_grant_mask_parent", "Parental"),
bf_boolean8(0x40, "rights_grant_mask_search", "Search"),
bf_boolean8(0x80, "rights_grant_mask_mod", "Modify"),
])
RightsRevokeMask = bitfield8("rights_revoke_mask", "Revoke Rights", [
bf_boolean8(0x01, "rights_revoke_mask_read", "Read"),
bf_boolean8(0x02, "rights_revoke_mask_write", "Write"),
bf_boolean8(0x04, "rights_revoke_mask_open", "Open"),
bf_boolean8(0x08, "rights_revoke_mask_create", "Create"),
bf_boolean8(0x10, "rights_revoke_mask_del", "Delete"),
bf_boolean8(0x20, "rights_revoke_mask_parent", "Parental"),
bf_boolean8(0x40, "rights_revoke_mask_search", "Search"),
bf_boolean8(0x80, "rights_revoke_mask_mod", "Modify"),
])
RIPSocketNumber = uint16("rip_socket_num", "RIP Socket Number")
RIPSocketNumber.Display("BASE_HEX")
RouterDownFlag = boolean8("router_dn_flag", "Router Down Flag")
RPCccode = val_string16("rpc_c_code", "RPC Completion Code", [
[ 0x0000, "Successful" ],
])
RTagNumber = uint32("r_tag_num", "Resource Tag Number")
RTagNumber.Display("BASE_HEX")
RpyNearestSrvFlag = boolean8("rpy_nearest_srv_flag", "Reply to Nearest Server Flag")
SalvageableFileEntryNumber = uint32("salvageable_file_entry_number", "Salvageable File Entry Number")
SalvageableFileEntryNumber.Display("BASE_HEX")
SAPSocketNumber = uint16("sap_socket_number", "SAP Socket Number")
SAPSocketNumber.Display("BASE_HEX")
ScanItems = uint32("scan_items", "Number of Items returned from Scan")
SearchAttributes = bitfield8("sattr", "Search Attributes", [
bf_boolean8(0x01, "sattr_ronly", "Read-Only Files Allowed"),
bf_boolean8(0x02, "sattr_hid", "Hidden Files Allowed"),
bf_boolean8(0x04, "sattr_sys", "System Files Allowed"),
bf_boolean8(0x08, "sattr_exonly", "Execute-Only Files Allowed"),
bf_boolean8(0x10, "sattr_sub", "Subdirectories Only"),
bf_boolean8(0x20, "sattr_archive", "Archive"),
bf_boolean8(0x40, "sattr_execute_confirm", "Execute Confirm"),
bf_boolean8(0x80, "sattr_shareable", "Shareable"),
])
SearchAttributesLow = bitfield16("search_att_low", "Search Attributes", [
bf_boolean16(0x0001, "search_att_read_only", "Read-Only"),
bf_boolean16(0x0002, "search_att_hidden", "Hidden Files Allowed"),
bf_boolean16(0x0004, "search_att_system", "System"),
bf_boolean16(0x0008, "search_att_execute_only", "Execute-Only"),
bf_boolean16(0x0010, "search_att_sub", "Subdirectories Only"),
bf_boolean16(0x0020, "search_att_archive", "Archive"),
bf_boolean16(0x0040, "search_att_execute_confirm", "Execute Confirm"),
bf_boolean16(0x0080, "search_att_shareable", "Shareable"),
bf_boolean16(0x8000, "search_attr_all_files", "All Files and Directories"),
])
SearchBitMap = bitfield8("search_bit_map", "Search Bit Map", [
bf_boolean8(0x01, "search_bit_map_hidden", "Hidden"),
bf_boolean8(0x02, "search_bit_map_sys", "System"),
bf_boolean8(0x04, "search_bit_map_sub", "Subdirectory"),
bf_boolean8(0x08, "search_bit_map_files", "Files"),
])
SearchConnNumber = uint32("search_conn_number", "Search Connection Number")
SearchInstance = uint32("search_instance", "Search Instance")
SearchNumber = uint32("search_number", "Search Number")
SearchPattern = nstring8("search_pattern", "Search Pattern")
SearchPattern16 = nstring16("search_pattern_16", "Search Pattern")
SearchSequence = bytes("search_sequence", "Search Sequence", 9)
SearchSequenceWord = uint16("search_sequence_word", "Search Sequence", ENC_BIG_ENDIAN)
Second = uint8("s_second", "Seconds")
SecondsRelativeToTheYear2000 = uint32("sec_rel_to_y2k", "Seconds Relative to the Year 2000")
SecretStoreVerb = val_string8("ss_verb", "Secret Store Verb",[
[ 0x00, "Query Server" ],
[ 0x01, "Read App Secrets" ],
[ 0x02, "Write App Secrets" ],
[ 0x03, "Add Secret ID" ],
[ 0x04, "Remove Secret ID" ],
[ 0x05, "Remove SecretStore" ],
[ 0x06, "Enumerate Secret IDs" ],
[ 0x07, "Unlock Store" ],
[ 0x08, "Set Master Password" ],
[ 0x09, "Get Service Information" ],
])
SecurityEquivalentList = fw_string("security_equiv_list", "Security Equivalent List", 128)
SecurityFlag = bitfield8("security_flag", "Security Flag", [
bf_boolean8(0x01, "checksumming", "Checksumming"),
bf_boolean8(0x02, "signature", "Signature"),
bf_boolean8(0x04, "complete_signatures", "Complete Signatures"),
bf_boolean8(0x08, "encryption", "Encryption"),
bf_boolean8(0x80, "large_internet_packets", "Large Internet Packets (LIP) Disabled"),
])
SecurityRestrictionVersion = uint8("security_restriction_version", "Security Restriction Version")
SectorsPerBlock = uint8("sectors_per_block", "Sectors Per Block")
SectorsPerBlockLong = uint32("sectors_per_block_long", "Sectors Per Block")
SectorsPerCluster = uint16("sectors_per_cluster", "Sectors Per Cluster" )
SectorsPerClusterLong = uint32("sectors_per_cluster_long", "Sectors Per Cluster" )
SectorsPerTrack = uint8("sectors_per_track", "Sectors Per Track")
SectorSize = uint32("sector_size", "Sector Size")
SemaphoreHandle = uint32("semaphore_handle", "Semaphore Handle")
SemaphoreName = nstring8("semaphore_name", "Semaphore Name")
SemaphoreOpenCount = uint8("semaphore_open_count", "Semaphore Open Count")
SemaphoreShareCount = uint8("semaphore_share_count", "Semaphore Share Count")
SemaphoreTimeOut = uint16("semaphore_time_out", "Semaphore Time Out")
SemaphoreValue = uint16("semaphore_value", "Semaphore Value")
SendStatus = val_string8("send_status", "Send Status", [
[ 0x00, "Successful" ],
[ 0x01, "Illegal Station Number" ],
[ 0x02, "Client Not Logged In" ],
[ 0x03, "Client Not Accepting Messages" ],
[ 0x04, "Client Already has a Message" ],
[ 0x96, "No Alloc Space for the Message" ],
[ 0xfd, "Bad Station Number" ],
[ 0xff, "Failure" ],
])
SequenceByte = uint8("sequence_byte", "Sequence")
SequenceNumber = uint32("sequence_number", "Sequence Number")
SequenceNumber.Display("BASE_HEX")
SequenceNumberLong = uint64("sequence_number64", "Sequence Number")
SequenceNumberLong.Display("BASE_HEX")
ServerAddress = bytes("server_address", "Server Address", 12)
ServerAppNumber = uint16("server_app_num", "Server App Number")
ServerID = uint32("server_id_number", "Server ID", ENC_BIG_ENDIAN )
ServerID.Display("BASE_HEX")
ServerInfoFlags = val_string16("server_info_flags", "Server Information Flags", [
[ 0x0000, "This server is not a member of a Cluster" ],
[ 0x0001, "This server is a member of a Cluster" ],
])
serverListFlags = uint32("server_list_flags", "Server List Flags")
ServerName = fw_string("server_name", "Server Name", 48)
serverName50 = fw_string("server_name50", "Server Name", 50)
ServerNameLen = nstring8("server_name_len", "Server Name")
ServerNameStringz = stringz("server_name_stringz", "Server Name")
ServerNetworkAddress = bytes("server_network_address", "Server Network Address", 10)
ServerNode = bytes("server_node", "Server Node", 6)
ServerSerialNumber = uint32("server_serial_number", "Server Serial Number")
ServerStation = uint8("server_station", "Server Station")
ServerStationLong = uint32("server_station_long", "Server Station")
ServerStationList = uint8("server_station_list", "Server Station List")
ServerStatusRecord = fw_string("server_status_record", "Server Status Record", 64)
ServerTaskNumber = uint8("server_task_number", "Server Task Number")
ServerTaskNumberLong = uint32("server_task_number_long", "Server Task Number")
ServerType = uint16("server_type", "Server Type")
ServerType.Display("BASE_HEX")
ServerUtilization = uint32("server_utilization", "Server Utilization")
ServerUtilizationPercentage = uint8("server_utilization_percentage", "Server Utilization Percentage")
ServiceType = val_string16("Service_type", "Service Type", [
[ 0x0000, "Unknown" ],
[ 0x0001, "User" ],
[ 0x0002, "User group" ],
[ 0x0003, "Print queue" ],
[ 0x0004, "NetWare file server" ],
[ 0x0005, "Job server" ],
[ 0x0006, "Gateway" ],
[ 0x0007, "Print server" ],
[ 0x0008, "Archive queue" ],
[ 0x0009, "Archive server" ],
[ 0x000a, "Job queue" ],
[ 0x000b, "Administration" ],
[ 0x0021, "NAS SNA gateway" ],
[ 0x0026, "Remote bridge server" ],
[ 0x0027, "TCP/IP gateway" ],
[ 0xffff, "All Types" ],
])
SetCmdCategory = val_string8("set_cmd_category", "Set Command Category", [
[ 0x00, "Communications" ],
[ 0x01, "Memory" ],
[ 0x02, "File Cache" ],
[ 0x03, "Directory Cache" ],
[ 0x04, "File System" ],
[ 0x05, "Locks" ],
[ 0x06, "Transaction Tracking" ],
[ 0x07, "Disk" ],
[ 0x08, "Time" ],
[ 0x09, "NCP" ],
[ 0x0a, "Miscellaneous" ],
[ 0x0b, "Error Handling" ],
[ 0x0c, "Directory Services" ],
[ 0x0d, "MultiProcessor" ],
[ 0x0e, "Service Location Protocol" ],
[ 0x0f, "Licensing Services" ],
])
SetCmdFlags = bitfield8("set_cmd_flags", "Set Command Flags", [
bf_boolean8(0x01, "cmd_flags_startup_only", "Startup.ncf Only"),
bf_boolean8(0x02, "cmd_flags_hidden", "Hidden"),
bf_boolean8(0x04, "cmd_flags_advanced", "Advanced"),
bf_boolean8(0x08, "cmd_flags_later", "Restart Server Required to Take Effect"),
bf_boolean8(0x80, "cmd_flags_secure", "Console Secured"),
])
SetCmdName = stringz("set_cmd_name", "Set Command Name")
SetCmdType = val_string8("set_cmd_type", "Set Command Type", [
[ 0x00, "Numeric Value" ],
[ 0x01, "Boolean Value" ],
[ 0x02, "Ticks Value" ],
[ 0x04, "Time Value" ],
[ 0x05, "String Value" ],
[ 0x06, "Trigger Value" ],
[ 0x07, "Numeric Value" ],
])
SetCmdValueNum = uint32("set_cmd_value_num", "Set Command Value")
SetCmdValueString = stringz("set_cmd_value_string", "Set Command Value")
SetMask = bitfield32("set_mask", "Set Mask", [
bf_boolean32(0x00000001, "ncp_encoded_strings", "NCP Encoded Strings"),
bf_boolean32(0x00000002, "connection_code_page", "Connection Code Page"),
])
SetParmName = stringz("set_parm_name", "Set Parameter Name")
SFTErrorTable = bytes("sft_error_table", "SFT Error Table", 60)
SFTSupportLevel = val_string8("sft_support_level", "SFT Support Level", [
[ 0x01, "Server Offers Hot Disk Error Fixing" ],
[ 0x02, "Server Offers Disk Mirroring and Transaction Tracking" ],
[ 0x03, "Server Offers Physical Server Mirroring" ],
])
ShareableLockCount = uint16("shareable_lock_count", "Shareable Lock Count")
SharedMemoryAddresses = bytes("shared_memory_addresses", "Shared Memory Addresses", 10)
ShortName = fw_string("short_name", "Short Name", 12)
ShortStkName = fw_string("short_stack_name", "Short Stack Name", 16)
SiblingCount = uint32("sibling_count", "Sibling Count")
SixtyFourBitOffsetsSupportedFlag = val_string8("64_bit_flag", "64 Bit Support", [
[ 0x00, "No support for 64 bit offsets" ],
[ 0x01, "64 bit offsets supported" ],
[ 0x02, "Use 64 bit file transfer NCP's" ],
])
SMIDs = uint32("smids", "Storage Media ID's")
SoftwareDescription = fw_string("software_description", "Software Description", 65)
SoftwareDriverType = uint8("software_driver_type", "Software Driver Type")
SoftwareMajorVersionNumber = uint8("software_major_version_number", "Software Major Version Number")
SoftwareMinorVersionNumber = uint8("software_minor_version_number", "Software Minor Version Number")
SourceDirHandle = uint8("source_dir_handle", "Source Directory Handle")
SourceFileHandle = bytes("s_fhandle_64bit", "Source File Handle", 6)
SourceFileOffset = bytes("s_foffset", "Source File Offset", 8)
sourceOriginateTime = bytes("source_originate_time", "Source Originate Time", 8)
SourcePath = nstring8("source_path", "Source Path")
SourcePathComponentCount = uint8("source_component_count", "Source Path Component Count")
sourceReturnTime = bytes("source_return_time", "Source Return Time", 8)
SpaceUsed = uint32("space_used", "Space Used")
SpaceMigrated = uint32("space_migrated", "Space Migrated")
SrcNameSpace = val_string8("src_name_space", "Source Name Space", [
[ 0x00, "DOS Name Space" ],
[ 0x01, "MAC Name Space" ],
[ 0x02, "NFS Name Space" ],
[ 0x04, "Long Name Space" ],
])
SubFuncStrucLen = uint16("sub_func_struc_len", "Structure Length")
SupModID = uint32("sup_mod_id", "Sup Mod ID")
StackCount = uint32("stack_count", "Stack Count")
StackFullNameStr = nstring8("stack_full_name_str", "Stack Full Name")
StackMajorVN = uint8("stack_major_vn", "Stack Major Version Number")
StackMinorVN = uint8("stack_minor_vn", "Stack Minor Version Number")
StackNumber = uint32("stack_number", "Stack Number")
StartConnNumber = uint32("start_conn_num", "Starting Connection Number")
StartingBlock = uint16("starting_block", "Starting Block")
StartingNumber = uint32("starting_number", "Starting Number")
StartingSearchNumber = uint16("start_search_number", "Start Search Number")
StartNumber = uint32("start_number", "Start Number")
startNumberFlag = uint16("start_number_flag", "Start Number Flag")
StartOffset64bit = bytes("s_offset_64bit", "64bit Starting Offset", 64)
StartVolumeNumber = uint32("start_volume_number", "Starting Volume Number")
StationList = uint32("station_list", "Station List")
StationNumber = bytes("station_number", "Station Number", 3)
StatMajorVersion = uint8("stat_major_version", "Statistics Table Major Version")
StatMinorVersion = uint8("stat_minor_version", "Statistics Table Minor Version")
Status = bitfield16("status", "Status", [
bf_boolean16(0x0001, "user_info_logged_in", "Logged In"),
bf_boolean16(0x0002, "user_info_being_abort", "Being Aborted"),
bf_boolean16(0x0004, "user_info_audited", "Audited"),
bf_boolean16(0x0008, "user_info_need_sec", "Needs Security Change"),
bf_boolean16(0x0010, "user_info_mac_station", "MAC Station"),
bf_boolean16(0x0020, "user_info_temp_authen", "Temporary Authenticated"),
bf_boolean16(0x0040, "user_info_audit_conn", "Audit Connection Recorded"),
bf_boolean16(0x0080, "user_info_dsaudit_conn", "DS Audit Connection Recorded"),
bf_boolean16(0x0100, "user_info_logout", "Logout in Progress"),
bf_boolean16(0x0200, "user_info_int_login", "Internal Login"),
bf_boolean16(0x0400, "user_info_bindery", "Bindery Connection"),
])
StatusFlagBits = bitfield32("status_flag_bits", "Status Flag", [
bf_boolean32(0x00000001, "status_flag_bits_suballoc", "Sub Allocation"),
bf_boolean32(0x00000002, "status_flag_bits_comp", "Compression"),
bf_boolean32(0x00000004, "status_flag_bits_migrate", "Migration"),
bf_boolean32(0x00000008, "status_flag_bits_audit", "Audit"),
bf_boolean32(0x00000010, "status_flag_bits_ro", "Read Only"),
bf_boolean32(0x00000020, "status_flag_bits_im_purge", "Immediate Purge"),
bf_boolean32(0x00000040, "status_flag_bits_64bit", "64Bit File Offsets"),
bf_boolean32(0x00000080, "status_flag_bits_utf8", "UTF8 NCP Strings"),
bf_boolean32(0x80000000, "status_flag_bits_nss", "NSS Volume"),
])
SubAllocClusters = uint32("sub_alloc_clusters", "Sub Alloc Clusters")
SubAllocFreeableClusters = uint32("sub_alloc_freeable_clusters", "Sub Alloc Freeable Clusters")
Subdirectory = uint32("sub_directory", "Subdirectory")
Subdirectory.Display("BASE_HEX")
SuggestedFileSize = uint32("suggested_file_size", "Suggested File Size")
SupportModuleID = uint32("support_module_id", "Support Module ID")
SynchName = nstring8("synch_name", "Synch Name")
SystemIntervalMarker = uint32("system_interval_marker", "System Interval Marker")
TabSize = uint8( "tab_size", "Tab Size" )
TargetClientList = uint8("target_client_list", "Target Client List")
TargetConnectionNumber = uint16("target_connection_number", "Target Connection Number")
TargetDirectoryBase = uint32("target_directory_base", "Target Directory Base")
TargetDirHandle = uint8("target_dir_handle", "Target Directory Handle")
TargetEntryID = uint32("target_entry_id", "Target Entry ID")
TargetEntryID.Display("BASE_HEX")
TargetExecutionTime = bytes("target_execution_time", "Target Execution Time", 6)
TargetFileHandle = bytes("target_file_handle", "Target File Handle", 6)
TargetFileOffset = uint32("target_file_offset", "Target File Offset")
TargetFileOffset64bit = bytes("t_foffset", "Target File Offset", 8)
TargetMessage = nstring8("target_message", "Message")
TargetPrinter = uint8( "target_ptr", "Target Printer" )
targetReceiveTime = bytes("target_receive_time", "Target Receive Time", 8)
TargetServerIDNumber = uint32("target_server_id_number", "Target Server ID Number", ENC_BIG_ENDIAN )
TargetServerIDNumber.Display("BASE_HEX")
targetTransmitTime = bytes("target_transmit_time", "Target Transmit Time", 8)
TaskNumByte = uint8("task_num_byte", "Task Number")
TaskNumber = uint32("task_number", "Task Number")
TaskNumberWord = uint16("task_number_word", "Task Number")
TaskState = val_string8("task_state", "Task State", [
[ 0x00, "Normal" ],
[ 0x01, "TTS explicit transaction in progress" ],
[ 0x02, "TTS implicit transaction in progress" ],
[ 0x04, "Shared file set lock in progress" ],
])
TextJobDescription = fw_string("text_job_description", "Text Job Description", 50)
ThrashingCount = uint16("thrashing_count", "Thrashing Count")
TimeoutLimit = uint16("timeout_limit", "Timeout Limit")
TimesyncStatus = bitfield32("timesync_status_flags", "Timesync Status", [
bf_boolean32(0x00000001, "timesync_status_sync", "Time is Synchronized"),
bf_boolean32(0x00000002, "timesync_status_net_sync", "Time is Synchronized to the Network"),
bf_boolean32(0x00000004, "timesync_status_active", "Time Synchronization is Active"),
bf_boolean32(0x00000008, "timesync_status_external", "External Time Synchronization Active"),
bf_val_str32(0x00000700, "timesync_status_server_type", "Time Server Type", [
[ 0x01, "Client Time Server" ],
[ 0x02, "Secondary Time Server" ],
[ 0x03, "Primary Time Server" ],
[ 0x04, "Reference Time Server" ],
[ 0x05, "Single Reference Time Server" ],
]),
bf_boolean32(0x000f0000, "timesync_status_ext_sync", "External Clock Status"),
])
TimeToNet = uint16("time_to_net", "Time To Net")
TotalBlocks = uint32("total_blocks", "Total Blocks")
TotalBlocks64 = uint64("total_blocks64", "Total Blocks")
TotalBlocksToDecompress = uint32("total_blks_to_dcompress", "Total Blocks To Decompress")
TotalBytesRead = bytes("user_info_ttl_bytes_rd", "Total Bytes Read", 6)
TotalBytesWritten = bytes("user_info_ttl_bytes_wrt", "Total Bytes Written", 6)
TotalCacheWrites = uint32("total_cache_writes", "Total Cache Writes")
TotalChangedFATs = uint32("total_changed_fats", "Total Changed FAT Entries")
TotalCommonCnts = uint32("total_common_cnts", "Total Common Counts")
TotalCntBlocks = uint32("total_cnt_blocks", "Total Count Blocks")
TotalDataStreamDiskSpaceAlloc = uint32("ttl_data_str_size_space_alloc", "Total Data Stream Disk Space Alloc")
TotalDirectorySlots = uint16("total_directory_slots", "Total Directory Slots")
TotalDirectoryEntries = uint32("total_dir_entries", "Total Directory Entries")
TotalDirEntries64 = uint64("total_dir_entries64", "Total Directory Entries")
TotalDynamicSpace = uint32("total_dynamic_space", "Total Dynamic Space")
TotalExtendedDirectoryExtents = uint32("total_extended_directory_extents", "Total Extended Directory Extents")
TotalFileServicePackets = uint32("total_file_service_packets", "Total File Service Packets")
TotalFilesOpened = uint32("total_files_opened", "Total Files Opened")
TotalLFSCounters = uint32("total_lfs_counters", "Total LFS Counters")
TotalOffspring = uint16("total_offspring", "Total Offspring")
TotalOtherPackets = uint32("total_other_packets", "Total Other Packets")
TotalQueueJobs = uint32("total_queue_jobs", "Total Queue Jobs")
TotalReadRequests = uint32("total_read_requests", "Total Read Requests")
TotalRequest = uint32("total_request", "Total Requests")
TotalRequestPackets = uint32("total_request_packets", "Total Request Packets")
TotalRoutedPackets = uint32("total_routed_packets", "Total Routed Packets")
TotalRxPkts = uint32("total_rx_pkts", "Total Receive Packets")
TotalServerMemory = uint16("total_server_memory", "Total Server Memory", ENC_BIG_ENDIAN)
TotalTransactionsBackedOut = uint32("total_trans_backed_out", "Total Transactions Backed Out")
TotalTransactionsPerformed = uint32("total_trans_performed", "Total Transactions Performed")
TotalTxPkts = uint32("total_tx_pkts", "Total Transmit Packets")
TotalUnfilledBackoutRequests = uint16("total_unfilled_backout_requests", "Total Unfilled Backout Requests")
TotalVolumeClusters = uint16("total_volume_clusters", "Total Volume Clusters")
TotalWriteRequests = uint32("total_write_requests", "Total Write Requests")
TotalWriteTransactionsPerformed = uint32("total_write_trans_performed", "Total Write Transactions Performed")
TrackOnFlag = boolean8("track_on_flag", "Track On Flag")
TransactionDiskSpace = uint16("transaction_disk_space", "Transaction Disk Space")
TransactionFATAllocations = uint32("transaction_fat_allocations", "Transaction FAT Allocations")
TransactionFileSizeChanges = uint32("transaction_file_size_changes", "Transaction File Size Changes")
TransactionFilesTruncated = uint32("transaction_files_truncated", "Transaction Files Truncated")
TransactionNumber = uint32("transaction_number", "Transaction Number")
TransactionTrackingEnabled = uint8("transaction_tracking_enabled", "Transaction Tracking Enabled")
TransactionTrackingFlag = uint16("tts_flag", "Transaction Tracking Flag")
TransactionTrackingSupported = uint8("transaction_tracking_supported", "Transaction Tracking Supported")
TransactionVolumeNumber = uint16("transaction_volume_number", "Transaction Volume Number")
TransportType = val_string8("transport_type", "Communications Type", [
[ 0x01, "Internet Packet Exchange (IPX)" ],
[ 0x05, "User Datagram Protocol (UDP)" ],
[ 0x06, "Transmission Control Protocol (TCP)" ],
])
TreeLength = uint32("tree_length", "Tree Length")
TreeName = nstring32("tree_name", "Tree Name")
TrusteeAccessMask = uint8("trustee_acc_mask", "Trustee Access Mask")
TrusteeRights = bitfield16("trustee_rights_low", "Trustee Rights", [
bf_boolean16(0x0001, "trustee_rights_read", "Read"),
bf_boolean16(0x0002, "trustee_rights_write", "Write"),
bf_boolean16(0x0004, "trustee_rights_open", "Open"),
bf_boolean16(0x0008, "trustee_rights_create", "Create"),
bf_boolean16(0x0010, "trustee_rights_del", "Delete"),
bf_boolean16(0x0020, "trustee_rights_parent", "Parental"),
bf_boolean16(0x0040, "trustee_rights_search", "Search"),
bf_boolean16(0x0080, "trustee_rights_modify", "Modify"),
bf_boolean16(0x0100, "trustee_rights_super", "Supervisor"),
])
TTSLevel = uint8("tts_level", "TTS Level")
TrusteeSetNumber = uint8("trustee_set_number", "Trustee Set Number")
TrusteeID = uint32("trustee_id_set", "Trustee ID")
TrusteeID.Display("BASE_HEX")
ttlCompBlks = uint32("ttl_comp_blks", "Total Compression Blocks")
TtlDSDskSpaceAlloc = uint32("ttl_ds_disk_space_alloc", "Total Streams Space Allocated")
TtlEAs = uint32("ttl_eas", "Total EA's")
TtlEAsDataSize = uint32("ttl_eas_data_size", "Total EA's Data Size")
TtlEAsKeySize = uint32("ttl_eas_key_size", "Total EA's Key Size")
ttlIntermediateBlks = uint32("ttl_inter_blks", "Total Intermediate Blocks")
TtlMigratedSize = uint32("ttl_migrated_size", "Total Migrated Size")
TtlNumOfRTags = uint32("ttl_num_of_r_tags", "Total Number of Resource Tags")
TtlNumOfSetCmds = uint32("ttl_num_of_set_cmds", "Total Number of Set Commands")
TtlValuesLength = uint32("ttl_values_length", "Total Values Length")
TtlWriteDataSize = uint32("ttl_write_data_size", "Total Write Data Size")
TurboUsedForFileService = uint16("turbo_used_for_file_service", "Turbo Used For File Service")
UnclaimedPkts = uint32("un_claimed_packets", "Unclaimed Packets")
UnCompressableDataStreamsCount = uint32("un_compressable_data_streams_count", "Uncompressable Data Streams Count")
Undefined8 = bytes("undefined_8", "Undefined", 8)
Undefined28 = bytes("undefined_28", "Undefined", 28)
UndefinedWord = uint16("undefined_word", "Undefined")
UniqueID = uint8("unique_id", "Unique ID")
UnknownByte = uint8("unknown_byte", "Unknown Byte")
Unused = uint8("un_used", "Unused")
UnusedBlocks = uint32("unused_blocks", "Unused Blocks")
UnUsedDirectoryEntries = uint32("un_used_directory_entries", "Unused Directory Entries")
UnusedDiskBlocks = uint32("unused_disk_blocks", "Unused Disk Blocks")
UnUsedExtendedDirectoryExtents = uint32("un_used_extended_directory_extents", "Unused Extended Directory Extents")
UpdateDate = uint16("update_date", "Update Date")
UpdateDate.NWDate()
UpdateID = uint32("update_id", "Update ID", ENC_BIG_ENDIAN)
UpdateID.Display("BASE_HEX")
UpdateTime = uint16("update_time", "Update Time")
UpdateTime.NWTime()
UseCount = val_string16("user_info_use_count", "Use Count", [
[ 0x0000, "Connection is not in use" ],
[ 0x0001, "Connection is in use" ],
])
UsedBlocks = uint32("used_blocks", "Used Blocks")
UserID = uint32("user_id", "User ID", ENC_BIG_ENDIAN)
UserID.Display("BASE_HEX")
UserLoginAllowed = val_string8("user_login_allowed", "Login Status", [
[ 0x00, "Client Login Disabled" ],
[ 0x01, "Client Login Enabled" ],
])
UserName = nstring8("user_name", "User Name")
UserName16 = fw_string("user_name_16", "User Name", 16)
UserName48 = fw_string("user_name_48", "User Name", 48)
UserType = uint16("user_type", "User Type")
UTCTimeInSeconds = eptime("uts_time_in_seconds", "UTC Time in Seconds")
ValueAvailable = val_string8("value_available", "Value Available", [
[ 0x00, "Has No Value" ],
[ 0xff, "Has Value" ],
])
VAPVersion = uint8("vap_version", "VAP Version")
VariableBitMask = uint32("variable_bit_mask", "Variable Bit Mask")
VariableBitsDefined = uint16("variable_bits_defined", "Variable Bits Defined")
VConsoleRevision = uint8("vconsole_rev", "Console Revision")
VConsoleVersion = uint8("vconsole_ver", "Console Version")
Verb = uint32("verb", "Verb")
VerbData = uint8("verb_data", "Verb Data")
version = uint32("version", "Version")
VersionNumber = uint8("version_number", "Version")
VersionNumberLong = uint32("version_num_long", "Version")
VertLocation = uint16("vert_location", "Vertical Location")
VirtualConsoleVersion = uint8("virtual_console_version", "Virtual Console Version")
VolumeID = uint32("volume_id", "Volume ID")
VolumeID.Display("BASE_HEX")
VolInfoReplyLen = uint16("vol_info_reply_len", "Volume Information Reply Length")
VolInfoReturnInfoMask = bitfield32("vol_info_ret_info_mask", "Return Information Mask", [
bf_boolean32(0x00000001, "vinfo_info64", "Return 64 bit Volume Information"),
bf_boolean32(0x00000002, "vinfo_volname", "Return Volume Name Details"),
])
VolumeCapabilities = bitfield32("volume_capabilities", "Volume Capabilities", [
bf_boolean32(0x00000001, "vol_cap_user_space", "NetWare User Space Restrictions Supported"),
bf_boolean32(0x00000002, "vol_cap_dir_quota", "NetWare Directory Quotas Supported"),
bf_boolean32(0x00000004, "vol_cap_dfs", "DFS is Active on Volume"),
bf_boolean32(0x00000008, "vol_cap_sal_purge", "NetWare Salvage and Purge Operations Supported"),
bf_boolean32(0x00000010, "vol_cap_comp", "NetWare Compression Supported"),
bf_boolean32(0x00000020, "vol_cap_cluster", "Volume is a Cluster Resource"),
bf_boolean32(0x00000040, "vol_cap_nss_admin", "Volume is the NSS Admin Volume"),
bf_boolean32(0x00000080, "vol_cap_nss", "Volume is Mounted by NSS"),
bf_boolean32(0x00000100, "vol_cap_ea", "OS2 style EA's Supported"),
bf_boolean32(0x00000200, "vol_cap_archive", "NetWare Archive bit Supported"),
bf_boolean32(0x00000400, "vol_cap_file_attr", "Full NetWare file Attributes Supported"),
])
VolumeCachedFlag = val_string8("volume_cached_flag", "Volume Cached Flag", [
[ 0x00, "Volume is Not Cached" ],
[ 0xff, "Volume is Cached" ],
])
VolumeDataStreams = uint8("volume_data_streams", "Volume Data Streams")
VolumeEpochTime = eptime("epoch_time", "Last Modified Timestamp")
VolumeGUID = stringz("volume_guid", "Volume GUID")
VolumeHashedFlag = val_string8("volume_hashed_flag", "Volume Hashed Flag", [
[ 0x00, "Volume is Not Hashed" ],
[ 0xff, "Volume is Hashed" ],
])
VolumeMountedFlag = val_string8("volume_mounted_flag", "Volume Mounted Flag", [
[ 0x00, "Volume is Not Mounted" ],
[ 0xff, "Volume is Mounted" ],
])
VolumeMountPoint = stringz("volume_mnt_point", "Volume Mount Point")
VolumeName = fw_string("volume_name", "Volume Name", 16)
VolumeNameLen = nstring8("volume_name_len", "Volume Name")
VolumeNameSpaces = uint8("volume_name_spaces", "Volume Name Spaces")
VolumeNameStringz = stringz("vol_name_stringz", "Volume Name")
VolumeNumber = uint8("volume_number", "Volume Number")
VolumeNumberLong = uint32("volume_number_long", "Volume Number")
VolumeRemovableFlag = val_string8("volume_removable_flag", "Volume Removable Flag", [
[ 0x00, "Disk Cannot be Removed from Server" ],
[ 0xff, "Disk Can be Removed from Server" ],
])
VolumeRequestFlags = val_string16("volume_request_flags", "Volume Request Flags", [
[ 0x0000, "Do not return name with volume number" ],
[ 0x0001, "Return name with volume number" ],
])
VolumeSizeInClusters = uint32("volume_size_in_clusters", "Volume Size in Clusters")
VolumesSupportedMax = uint16("volumes_supported_max", "Volumes Supported Max")
VolumeType = val_string16("volume_type", "Volume Type", [
[ 0x0000, "NetWare 386" ],
[ 0x0001, "NetWare 286" ],
[ 0x0002, "NetWare 386 Version 30" ],
[ 0x0003, "NetWare 386 Version 31" ],
])
VolumeTypeLong = val_string32("volume_type_long", "Volume Type", [
[ 0x00000000, "NetWare 386" ],
[ 0x00000001, "NetWare 286" ],
[ 0x00000002, "NetWare 386 Version 30" ],
[ 0x00000003, "NetWare 386 Version 31" ],
])
WastedServerMemory = uint16("wasted_server_memory", "Wasted Server Memory", ENC_BIG_ENDIAN)
WaitTime = uint32("wait_time", "Wait Time")
Year = val_string8("year", "Year",[
[ 0x50, "1980" ],
[ 0x51, "1981" ],
[ 0x52, "1982" ],
[ 0x53, "1983" ],
[ 0x54, "1984" ],
[ 0x55, "1985" ],
[ 0x56, "1986" ],
[ 0x57, "1987" ],
[ 0x58, "1988" ],
[ 0x59, "1989" ],
[ 0x5a, "1990" ],
[ 0x5b, "1991" ],
[ 0x5c, "1992" ],
[ 0x5d, "1993" ],
[ 0x5e, "1994" ],
[ 0x5f, "1995" ],
[ 0x60, "1996" ],
[ 0x61, "1997" ],
[ 0x62, "1998" ],
[ 0x63, "1999" ],
[ 0x64, "2000" ],
[ 0x65, "2001" ],
[ 0x66, "2002" ],
[ 0x67, "2003" ],
[ 0x68, "2004" ],
[ 0x69, "2005" ],
[ 0x6a, "2006" ],
[ 0x6b, "2007" ],
[ 0x6c, "2008" ],
[ 0x6d, "2009" ],
[ 0x6e, "2010" ],
[ 0x6f, "2011" ],
[ 0x70, "2012" ],
[ 0x71, "2013" ],
[ 0x72, "2014" ],
[ 0x73, "2015" ],
[ 0x74, "2016" ],
[ 0x75, "2017" ],
[ 0x76, "2018" ],
[ 0x77, "2019" ],
[ 0x78, "2020" ],
[ 0x79, "2021" ],
[ 0x7a, "2022" ],
[ 0x7b, "2023" ],
[ 0x7c, "2024" ],
[ 0x7d, "2025" ],
[ 0x7e, "2026" ],
[ 0x7f, "2027" ],
[ 0xc0, "1984" ],
[ 0xc1, "1985" ],
[ 0xc2, "1986" ],
[ 0xc3, "1987" ],
[ 0xc4, "1988" ],
[ 0xc5, "1989" ],
[ 0xc6, "1990" ],
[ 0xc7, "1991" ],
[ 0xc8, "1992" ],
[ 0xc9, "1993" ],
[ 0xca, "1994" ],
[ 0xcb, "1995" ],
[ 0xcc, "1996" ],
[ 0xcd, "1997" ],
[ 0xce, "1998" ],
[ 0xcf, "1999" ],
[ 0xd0, "2000" ],
[ 0xd1, "2001" ],
[ 0xd2, "2002" ],
[ 0xd3, "2003" ],
[ 0xd4, "2004" ],
[ 0xd5, "2005" ],
[ 0xd6, "2006" ],
[ 0xd7, "2007" ],
[ 0xd8, "2008" ],
[ 0xd9, "2009" ],
[ 0xda, "2010" ],
[ 0xdb, "2011" ],
[ 0xdc, "2012" ],
[ 0xdd, "2013" ],
[ 0xde, "2014" ],
[ 0xdf, "2015" ],
])
##############################################################################
# Structs
##############################################################################
acctngInfo = struct("acctng_info_struct", [
HoldTime,
HoldAmount,
ChargeAmount,
HeldConnectTimeInMinutes,
HeldRequests,
HeldBytesRead,
HeldBytesWritten,
],"Accounting Information")
AFP10Struct = struct("afp_10_struct", [
AFPEntryID,
ParentID,
AttributesDef16,
DataForkLen,
ResourceForkLen,
TotalOffspring,
CreationDate,
LastAccessedDate,
ModifiedDate,
ModifiedTime,
ArchivedDate,
ArchivedTime,
CreatorID,
Reserved4,
FinderAttr,
HorizLocation,
VertLocation,
FileDirWindow,
Reserved16,
LongName,
CreatorID,
ShortName,
AccessPrivileges,
], "AFP Information" )
AFP20Struct = struct("afp_20_struct", [
AFPEntryID,
ParentID,
AttributesDef16,
DataForkLen,
ResourceForkLen,
TotalOffspring,
CreationDate,
LastAccessedDate,
ModifiedDate,
ModifiedTime,
ArchivedDate,
ArchivedTime,
CreatorID,
Reserved4,
FinderAttr,
HorizLocation,
VertLocation,
FileDirWindow,
Reserved16,
LongName,
CreatorID,
ShortName,
AccessPrivileges,
Reserved,
ProDOSInfo,
], "AFP Information" )
ArchiveDateStruct = struct("archive_date_struct", [
ArchivedDate,
])
ArchiveIdStruct = struct("archive_id_struct", [
ArchiverID,
])
ArchiveInfoStruct = struct("archive_info_struct", [
ArchivedTime,
ArchivedDate,
ArchiverID,
], "Archive Information")
ArchiveTimeStruct = struct("archive_time_struct", [
ArchivedTime,
])
AttributesStruct = struct("attributes_struct", [
AttributesDef32,
FlagsDef,
], "Attributes")
authInfo = struct("auth_info_struct", [
Status,
Reserved2,
Privileges,
])
BoardNameStruct = struct("board_name_struct", [
DriverBoardName,
DriverShortName,
DriverLogicalName,
], "Board Name")
CacheInfo = struct("cache_info", [
uint32("max_byte_cnt", "Maximum Byte Count"),
uint32("min_num_of_cache_buff", "Minimum Number Of Cache Buffers"),
uint32("min_cache_report_thresh", "Minimum Cache Report Threshold"),
uint32("alloc_waiting", "Allocate Waiting Count"),
uint32("ndirty_blocks", "Number of Dirty Blocks"),
uint32("cache_dirty_wait_time", "Cache Dirty Wait Time"),
uint32("cache_max_concur_writes", "Cache Maximum Concurrent Writes"),
uint32("max_dirty_time", "Maximum Dirty Time"),
uint32("num_dir_cache_buff", "Number Of Directory Cache Buffers"),
uint32("cache_byte_to_block", "Cache Byte To Block Shift Factor"),
], "Cache Information")
CommonLanStruc = struct("common_lan_struct", [
boolean8("not_supported_mask", "Bit Counter Supported"),
Reserved3,
uint32("total_tx_packet_count", "Total Transmit Packet Count"),
uint32("total_rx_packet_count", "Total Receive Packet Count"),
uint32("no_ecb_available_count", "No ECB Available Count"),
uint32("packet_tx_too_big_count", "Transmit Packet Too Big Count"),
uint32("packet_tx_too_small_count", "Transmit Packet Too Small Count"),
uint32("packet_rx_overflow_count", "Receive Packet Overflow Count"),
uint32("packet_rx_too_big_count", "Receive Packet Too Big Count"),
uint32("packet_rs_too_small_count", "Receive Packet Too Small Count"),
uint32("packet_tx_misc_error_count", "Transmit Packet Misc Error Count"),
uint32("packet_rx_misc_error_count", "Receive Packet Misc Error Count"),
uint32("retry_tx_count", "Transmit Retry Count"),
uint32("checksum_error_count", "Checksum Error Count"),
uint32("hardware_rx_mismatch_count", "Hardware Receive Mismatch Count"),
], "Common LAN Information")
CompDeCompStat = struct("comp_d_comp_stat", [
uint32("cmphitickhigh", "Compress High Tick"),
uint32("cmphitickcnt", "Compress High Tick Count"),
uint32("cmpbyteincount", "Compress Byte In Count"),
uint32("cmpbyteoutcnt", "Compress Byte Out Count"),
uint32("cmphibyteincnt", "Compress High Byte In Count"),
uint32("cmphibyteoutcnt", "Compress High Byte Out Count"),
uint32("decphitickhigh", "DeCompress High Tick"),
uint32("decphitickcnt", "DeCompress High Tick Count"),
uint32("decpbyteincount", "DeCompress Byte In Count"),
uint32("decpbyteoutcnt", "DeCompress Byte Out Count"),
uint32("decphibyteincnt", "DeCompress High Byte In Count"),
uint32("decphibyteoutcnt", "DeCompress High Byte Out Count"),
], "Compression/Decompression Information")
ConnFileStruct = struct("conn_file_struct", [
ConnectionNumberWord,
TaskNumberWord,
LockType,
AccessControl,
LockFlag,
], "File Connection Information")
ConnStruct = struct("conn_struct", [
TaskNumByte,
LockType,
AccessControl,
LockFlag,
VolumeNumber,
DirectoryEntryNumberWord,
FileName14,
], "Connection Information")
ConnTaskStruct = struct("conn_task_struct", [
ConnectionNumberByte,
TaskNumByte,
], "Task Information")
Counters = struct("counters_struct", [
uint32("read_exist_blck", "Read Existing Block Count"),
uint32("read_exist_write_wait", "Read Existing Write Wait Count"),
uint32("read_exist_part_read", "Read Existing Partial Read Count"),
uint32("read_exist_read_err", "Read Existing Read Error Count"),
uint32("wrt_blck_cnt", "Write Block Count"),
uint32("wrt_entire_blck", "Write Entire Block Count"),
uint32("internl_dsk_get", "Internal Disk Get Count"),
uint32("internl_dsk_get_need_to_alloc", "Internal Disk Get Need To Allocate Count"),
uint32("internl_dsk_get_someone_beat", "Internal Disk Get Someone Beat My Count"),
uint32("internl_dsk_get_part_read", "Internal Disk Get Partial Read Count"),
uint32("internl_dsk_get_read_err", "Internal Disk Get Read Error Count"),
uint32("async_internl_dsk_get", "Async Internal Disk Get Count"),
uint32("async_internl_dsk_get_need_to_alloc", "Async Internal Disk Get Need To Alloc"),
uint32("async_internl_dsk_get_someone_beat", "Async Internal Disk Get Someone Beat Me"),
uint32("err_doing_async_read", "Error Doing Async Read Count"),
uint32("internl_dsk_get_no_read", "Internal Disk Get No Read Count"),
uint32("internl_dsk_get_no_read_alloc", "Internal Disk Get No Read Allocate Count"),
uint32("internl_dsk_get_no_read_someone_beat", "Internal Disk Get No Read Someone Beat Me Count"),
uint32("internl_dsk_write", "Internal Disk Write Count"),
uint32("internl_dsk_write_alloc", "Internal Disk Write Allocate Count"),
uint32("internl_dsk_write_someone_beat", "Internal Disk Write Someone Beat Me Count"),
uint32("write_err", "Write Error Count"),
uint32("wait_on_sema", "Wait On Semaphore Count"),
uint32("alloc_blck_i_had_to_wait_for", "Allocate Block I Had To Wait For Someone Count"),
uint32("alloc_blck", "Allocate Block Count"),
uint32("alloc_blck_i_had_to_wait", "Allocate Block I Had To Wait Count"),
], "Disk Counter Information")
CPUInformation = struct("cpu_information", [
PageTableOwnerFlag,
CPUType,
Reserved3,
CoprocessorFlag,
BusType,
Reserved3,
IOEngineFlag,
Reserved3,
FSEngineFlag,
Reserved3,
NonDedFlag,
Reserved3,
CPUString,
CoProcessorString,
BusString,
], "CPU Information")
CreationDateStruct = struct("creation_date_struct", [
CreationDate,
])
CreationInfoStruct = struct("creation_info_struct", [
CreationTime,
CreationDate,
endian(CreatorID, ENC_LITTLE_ENDIAN),
], "Creation Information")
CreationTimeStruct = struct("creation_time_struct", [
CreationTime,
])
CustomCntsInfo = struct("custom_cnts_info", [
CustomVariableValue,
CustomString,
], "Custom Counters" )
DataStreamInfo = struct("data_stream_info", [
AssociatedNameSpace,
DataStreamName
])
DataStreamSizeStruct = struct("data_stream_size_struct", [
DataStreamSize,
])
DirCacheInfo = struct("dir_cache_info", [
uint32("min_time_since_file_delete", "Minimum Time Since File Delete"),
uint32("abs_min_time_since_file_delete", "Absolute Minimum Time Since File Delete"),
uint32("min_num_of_dir_cache_buff", "Minimum Number Of Directory Cache Buffers"),
uint32("max_num_of_dir_cache_buff", "Maximum Number Of Directory Cache Buffers"),
uint32("num_of_dir_cache_buff", "Number Of Directory Cache Buffers"),
uint32("dc_min_non_ref_time", "DC Minimum Non-Referenced Time"),
uint32("dc_wait_time_before_new_buff", "DC Wait Time Before New Buffer"),
uint32("dc_max_concurrent_writes", "DC Maximum Concurrent Writes"),
uint32("dc_dirty_wait_time", "DC Dirty Wait Time"),
uint32("dc_double_read_flag", "DC Double Read Flag"),
uint32("map_hash_node_count", "Map Hash Node Count"),
uint32("space_restriction_node_count", "Space Restriction Node Count"),
uint32("trustee_list_node_count", "Trustee List Node Count"),
uint32("percent_of_vol_used_by_dirs", "Percent Of Volume Used By Directories"),
], "Directory Cache Information")
DirDiskSpaceRest64bit = struct("dir_disk_space_rest_64bit", [
Level,
MaxSpace64,
MinSpaceLeft64
], "Directory Disk Space Restriction 64 bit")
DirEntryStruct = struct("dir_entry_struct", [
DirectoryEntryNumber,
DOSDirectoryEntryNumber,
VolumeNumberLong,
], "Directory Entry Information")
DirectoryInstance = struct("directory_instance", [
SearchSequenceWord,
DirectoryID,
DirectoryName14,
DirectoryAttributes,
DirectoryAccessRights,
endian(CreationDate, ENC_BIG_ENDIAN),
endian(AccessDate, ENC_BIG_ENDIAN),
CreatorID,
Reserved2,
DirectoryStamp,
], "Directory Information")
DMInfoLevel0 = struct("dm_info_level_0", [
uint32("io_flag", "IO Flag"),
uint32("sm_info_size", "Storage Module Information Size"),
uint32("avail_space", "Available Space"),
uint32("used_space", "Used Space"),
stringz("s_module_name", "Storage Module Name"),
uint8("s_m_info", "Storage Media Information"),
])
DMInfoLevel1 = struct("dm_info_level_1", [
NumberOfSMs,
SMIDs,
])
DMInfoLevel2 = struct("dm_info_level_2", [
Name,
])
DOSDirectoryEntryStruct = struct("dos_directory_entry_struct", [
AttributesDef32,
UniqueID,
PurgeFlags,
DestNameSpace,
DirectoryNameLen,
DirectoryName,
CreationTime,
CreationDate,
CreatorID,
ArchivedTime,
ArchivedDate,
ArchiverID,
UpdateTime,
UpdateDate,
NextTrusteeEntry,
Reserved48,
InheritedRightsMask,
], "DOS Directory Information")
DOSFileEntryStruct = struct("dos_file_entry_struct", [
AttributesDef32,
UniqueID,
PurgeFlags,
DestNameSpace,
NameLen,
Name12,
CreationTime,
CreationDate,
CreatorID,
ArchivedTime,
ArchivedDate,
ArchiverID,
UpdateTime,
UpdateDate,
UpdateID,
FileSize,
DataForkFirstFAT,
NextTrusteeEntry,
Reserved36,
InheritedRightsMask,
LastAccessedDate,
Reserved20,
PrimaryEntry,
NameList,
], "DOS File Information")
DSSpaceAllocateStruct = struct("ds_space_alloc_struct", [
DataStreamSpaceAlloc,
])
DynMemStruct = struct("dyn_mem_struct", [
uint32("dyn_mem_struct_total", "Total Dynamic Space" ),
uint32("dyn_mem_struct_max", "Max Used Dynamic Space" ),
uint32("dyn_mem_struct_cur", "Current Used Dynamic Space" ),
], "Dynamic Memory Information")
EAInfoStruct = struct("ea_info_struct", [
EADataSize,
EACount,
EAKeySize,
], "Extended Attribute Information")
ExtraCacheCntrs = struct("extra_cache_cntrs", [
uint32("internl_dsk_get_no_wait", "Internal Disk Get No Wait Count"),
uint32("internl_dsk_get_no_wait_need", "Internal Disk Get No Wait Need To Allocate Count"),
uint32("internl_dsk_get_no_wait_no_blk", "Internal Disk Get No Wait No Block Count"),
uint32("id_get_no_read_no_wait", "ID Get No Read No Wait Count"),
uint32("id_get_no_read_no_wait_sema", "ID Get No Read No Wait Semaphored Count"),
uint32("id_get_no_read_no_wait_buffer", "ID Get No Read No Wait No Buffer Count"),
uint32("id_get_no_read_no_wait_alloc", "ID Get No Read No Wait Allocate Count"),
uint32("id_get_no_read_no_wait_no_alloc", "ID Get No Read No Wait No Alloc Count"),
uint32("id_get_no_read_no_wait_no_alloc_sema", "ID Get No Read No Wait No Alloc Semaphored Count"),
uint32("id_get_no_read_no_wait_no_alloc_alloc", "ID Get No Read No Wait No Alloc Allocate Count"),
], "Extra Cache Counters Information")
FileSize64bitStruct = struct("file_sz_64bit_struct", [
FileSize64bit,
])
ReferenceIDStruct = struct("ref_id_struct", [
CurrentReferenceID,
])
NSAttributeStruct = struct("ns_attrib_struct", [
AttributesDef32,
])
DStreamActual = struct("d_stream_actual", [
DataStreamNumberLong,
DataStreamFATBlocks,
], "Actual Stream")
DStreamLogical = struct("d_string_logical", [
DataStreamNumberLong,
DataStreamSize,
], "Logical Stream")
LastUpdatedInSecondsStruct = struct("last_update_in_seconds_struct", [
SecondsRelativeToTheYear2000,
])
DOSNameStruct = struct("dos_name_struct", [
FileName,
], "DOS File Name")
DOSName16Struct = struct("dos_name_16_struct", [
FileName16,
], "DOS File Name")
FlushTimeStruct = struct("flush_time_struct", [
FlushTime,
])
ParentBaseIDStruct = struct("parent_base_id_struct", [
ParentBaseID,
])
MacFinderInfoStruct = struct("mac_finder_info_struct", [
MacFinderInfo,
])
SiblingCountStruct = struct("sibling_count_struct", [
SiblingCount,
])
EffectiveRightsStruct = struct("eff_rights_struct", [
EffectiveRights,
Reserved3,
])
MacTimeStruct = struct("mac_time_struct", [
MACCreateDate,
MACCreateTime,
MACBackupDate,
MACBackupTime,
])
LastAccessedTimeStruct = struct("last_access_time_struct", [
LastAccessedTime,
])
FileAttributesStruct = struct("file_attributes_struct", [
AttributesDef32,
])
FileInfoStruct = struct("file_info_struct", [
ParentID,
DirectoryEntryNumber,
TotalBlocksToDecompress,
#CurrentBlockBeingDecompressed,
], "File Information")
FileInstance = struct("file_instance", [
SearchSequenceWord,
DirectoryID,
FileName14,
AttributesDef,
FileMode,
FileSize,
endian(CreationDate, ENC_BIG_ENDIAN),
endian(AccessDate, ENC_BIG_ENDIAN),
endian(UpdateDate, ENC_BIG_ENDIAN),
endian(UpdateTime, ENC_BIG_ENDIAN),
], "File Instance")
FileNameStruct = struct("file_name_struct", [
FileName,
], "File Name")
FileName16Struct = struct("file_name16_struct", [
FileName16,
], "File Name")
FileServerCounters = struct("file_server_counters", [
uint16("too_many_hops", "Too Many Hops"),
uint16("unknown_network", "Unknown Network"),
uint16("no_space_for_service", "No Space For Service"),
uint16("no_receive_buff", "No Receive Buffers"),
uint16("not_my_network", "Not My Network"),
uint32("netbios_progated", "NetBIOS Propagated Count"),
uint32("ttl_pckts_srvcd", "Total Packets Serviced"),
uint32("ttl_pckts_routed", "Total Packets Routed"),
], "File Server Counters")
FileSystemInfo = struct("file_system_info", [
uint32("fat_moved", "Number of times the OS has move the location of FAT"),
uint32("fat_write_err", "Number of write errors in both original and mirrored copies of FAT"),
uint32("someone_else_did_it_0", "Someone Else Did It Count 0"),
uint32("someone_else_did_it_1", "Someone Else Did It Count 1"),
uint32("someone_else_did_it_2", "Someone Else Did It Count 2"),
uint32("i_ran_out_someone_else_did_it_0", "I Ran Out Someone Else Did It Count 0"),
uint32("i_ran_out_someone_else_did_it_1", "I Ran Out Someone Else Did It Count 1"),
uint32("i_ran_out_someone_else_did_it_2", "I Ran Out Someone Else Did It Count 2"),
uint32("turbo_fat_build_failed", "Turbo FAT Build Failed Count"),
uint32("extra_use_count_node_count", "Errors allocating a use count node for TTS"),
uint32("extra_extra_use_count_node_count", "Errors allocating an additional use count node for TTS"),
uint32("error_read_last_fat", "Error Reading Last FAT Count"),
uint32("someone_else_using_this_file", "Someone Else Using This File Count"),
], "File System Information")
GenericInfoDef = struct("generic_info_def", [
fw_string("generic_label", "Label", 64),
uint32("generic_ident_type", "Identification Type"),
uint32("generic_ident_time", "Identification Time"),
uint32("generic_media_type", "Media Type"),
uint32("generic_cartridge_type", "Cartridge Type"),
uint32("generic_unit_size", "Unit Size"),
uint32("generic_block_size", "Block Size"),
uint32("generic_capacity", "Capacity"),
uint32("generic_pref_unit_size", "Preferred Unit Size"),
fw_string("generic_name", "Name",64),
uint32("generic_type", "Type"),
uint32("generic_status", "Status"),
uint32("generic_func_mask", "Function Mask"),
uint32("generic_ctl_mask", "Control Mask"),
uint32("generic_parent_count", "Parent Count"),
uint32("generic_sib_count", "Sibling Count"),
uint32("generic_child_count", "Child Count"),
uint32("generic_spec_info_sz", "Specific Information Size"),
uint32("generic_object_uniq_id", "Unique Object ID"),
uint32("generic_media_slot", "Media Slot"),
], "Generic Information")
HandleInfoLevel0 = struct("handle_info_level_0", [
# DataStream,
])
HandleInfoLevel1 = struct("handle_info_level_1", [
DataStream,
])
HandleInfoLevel2 = struct("handle_info_level_2", [
DOSDirectoryBase,
NameSpace,
DataStream,
])
HandleInfoLevel3 = struct("handle_info_level_3", [
DOSDirectoryBase,
NameSpace,
])
HandleInfoLevel4 = struct("handle_info_level_4", [
DOSDirectoryBase,
NameSpace,
ParentDirectoryBase,
ParentDOSDirectoryBase,
])
HandleInfoLevel5 = struct("handle_info_level_5", [
DOSDirectoryBase,
NameSpace,
DataStream,
ParentDirectoryBase,
ParentDOSDirectoryBase,
])
IPXInformation = struct("ipx_information", [
uint32("ipx_send_pkt", "IPX Send Packet Count"),
uint16("ipx_malform_pkt", "IPX Malformed Packet Count"),
uint32("ipx_get_ecb_req", "IPX Get ECB Request Count"),
uint32("ipx_get_ecb_fail", "IPX Get ECB Fail Count"),
uint32("ipx_aes_event", "IPX AES Event Count"),
uint16("ipx_postponed_aes", "IPX Postponed AES Count"),
uint16("ipx_max_conf_sock", "IPX Max Configured Socket Count"),
uint16("ipx_max_open_sock", "IPX Max Open Socket Count"),
uint16("ipx_open_sock_fail", "IPX Open Socket Fail Count"),
uint32("ipx_listen_ecb", "IPX Listen ECB Count"),
uint16("ipx_ecb_cancel_fail", "IPX ECB Cancel Fail Count"),
uint16("ipx_get_lcl_targ_fail", "IPX Get Local Target Fail Count"),
], "IPX Information")
JobEntryTime = struct("job_entry_time", [
Year,
Month,
Day,
Hour,
Minute,
Second,
], "Job Entry Time")
JobStruct3x = struct("job_struct_3x", [
RecordInUseFlag,
PreviousRecord,
NextRecord,
ClientStationLong,
ClientTaskNumberLong,
ClientIDNumber,
TargetServerIDNumber,
TargetExecutionTime,
JobEntryTime,
JobNumberLong,
JobType,
JobPositionWord,
JobControlFlagsWord,
JobFileName,
JobFileHandleLong,
ServerStationLong,
ServerTaskNumberLong,
ServerID,
TextJobDescription,
ClientRecordArea,
], "Job Information")
JobStruct = struct("job_struct", [
ClientStation,
ClientTaskNumber,
ClientIDNumber,
TargetServerIDNumber,
TargetExecutionTime,
JobEntryTime,
JobNumber,
JobType,
JobPosition,
JobControlFlags,
JobFileName,
JobFileHandle,
ServerStation,
ServerTaskNumber,
ServerID,
TextJobDescription,
ClientRecordArea,
], "Job Information")
JobStructNew = struct("job_struct_new", [
RecordInUseFlag,
PreviousRecord,
NextRecord,
ClientStationLong,
ClientTaskNumberLong,
ClientIDNumber,
TargetServerIDNumber,
TargetExecutionTime,
JobEntryTime,
JobNumberLong,
JobType,
JobPositionWord,
JobControlFlagsWord,
JobFileName,
JobFileHandleLong,
ServerStationLong,
ServerTaskNumberLong,
ServerID,
], "Job Information")
KnownRoutes = struct("known_routes", [
NetIDNumber,
HopsToNet,
NetStatus,
TimeToNet,
], "Known Routes")
SrcEnhNWHandlePathS1 = struct("source_nwhandle", [
DirectoryBase,
VolumeNumber,
HandleFlag,
DataTypeFlag,
Reserved5,
], "Source Information")
DstEnhNWHandlePathS1 = struct("destination_nwhandle", [
DirectoryBase,
VolumeNumber,
HandleFlag,
DataTypeFlag,
Reserved5,
], "Destination Information")
KnownServStruc = struct("known_server_struct", [
ServerAddress,
HopsToNet,
ServerNameStringz,
], "Known Servers")
LANConfigInfo = struct("lan_cfg_info", [
LANdriverCFG_MajorVersion,
LANdriverCFG_MinorVersion,
LANdriverNodeAddress,
Reserved,
LANdriverModeFlags,
LANdriverBoardNumber,
LANdriverBoardInstance,
LANdriverMaximumSize,
LANdriverMaxRecvSize,
LANdriverRecvSize,
LANdriverCardID,
LANdriverMediaID,
LANdriverTransportTime,
LANdriverSrcRouting,
LANdriverLineSpeed,
LANdriverReserved,
LANdriverMajorVersion,
LANdriverMinorVersion,
LANdriverFlags,
LANdriverSendRetries,
LANdriverLink,
LANdriverSharingFlags,
LANdriverSlot,
LANdriverIOPortsAndRanges1,
LANdriverIOPortsAndRanges2,
LANdriverIOPortsAndRanges3,
LANdriverIOPortsAndRanges4,
LANdriverMemoryDecode0,
LANdriverMemoryLength0,
LANdriverMemoryDecode1,
LANdriverMemoryLength1,
LANdriverInterrupt1,
LANdriverInterrupt2,
LANdriverDMAUsage1,
LANdriverDMAUsage2,
LANdriverLogicalName,
LANdriverIOReserved,
LANdriverCardName,
], "LAN Configuration Information")
LastAccessStruct = struct("last_access_struct", [
LastAccessedDate,
])
lockInfo = struct("lock_info_struct", [
LogicalLockThreshold,
PhysicalLockThreshold,
FileLockCount,
RecordLockCount,
], "Lock Information")
LockStruct = struct("lock_struct", [
TaskNumByte,
LockType,
RecordStart,
RecordEnd,
], "Locks")
LoginTime = struct("login_time", [
Year,
Month,
Day,
Hour,
Minute,
Second,
DayOfWeek,
], "Login Time")
LogLockStruct = struct("log_lock_struct", [
TaskNumberWord,
LockStatus,
LockName,
], "Logical Locks")
LogRecStruct = struct("log_rec_struct", [
ConnectionNumberWord,
TaskNumByte,
LockStatus,
], "Logical Record Locks")
LSLInformation = struct("lsl_information", [
uint32("rx_buffers", "Receive Buffers"),
uint32("rx_buffers_75", "Receive Buffers Warning Level"),
uint32("rx_buffers_checked_out", "Receive Buffers Checked Out Count"),
uint32("rx_buffer_size", "Receive Buffer Size"),
uint32("max_phy_packet_size", "Maximum Physical Packet Size"),
uint32("last_time_rx_buff_was_alloc", "Last Time a Receive Buffer was Allocated"),
uint32("max_num_of_protocols", "Maximum Number of Protocols"),
uint32("max_num_of_media_types", "Maximum Number of Media Types"),
uint32("total_tx_packets", "Total Transmit Packets"),
uint32("get_ecb_buf", "Get ECB Buffers"),
uint32("get_ecb_fails", "Get ECB Failures"),
uint32("aes_event_count", "AES Event Count"),
uint32("post_poned_events", "Postponed Events"),
uint32("ecb_cxl_fails", "ECB Cancel Failures"),
uint32("valid_bfrs_reused", "Valid Buffers Reused"),
uint32("enqueued_send_cnt", "Enqueued Send Count"),
uint32("total_rx_packets", "Total Receive Packets"),
uint32("unclaimed_packets", "Unclaimed Packets"),
uint8("stat_table_major_version", "Statistics Table Major Version"),
uint8("stat_table_minor_version", "Statistics Table Minor Version"),
], "LSL Information")
MaximumSpaceStruct = struct("max_space_struct", [
MaxSpace,
])
MemoryCounters = struct("memory_counters", [
uint32("orig_num_cache_buff", "Original Number Of Cache Buffers"),
uint32("curr_num_cache_buff", "Current Number Of Cache Buffers"),
uint32("cache_dirty_block_thresh", "Cache Dirty Block Threshold"),
uint32("wait_node", "Wait Node Count"),
uint32("wait_node_alloc_fail", "Wait Node Alloc Failure Count"),
uint32("move_cache_node", "Move Cache Node Count"),
uint32("move_cache_node_from_avai", "Move Cache Node From Avail Count"),
uint32("accel_cache_node_write", "Accelerate Cache Node Write Count"),
uint32("rem_cache_node", "Remove Cache Node Count"),
uint32("rem_cache_node_from_avail", "Remove Cache Node From Avail Count"),
], "Memory Counters")
MLIDBoardInfo = struct("mlid_board_info", [
uint32("protocol_board_num", "Protocol Board Number"),
uint16("protocol_number", "Protocol Number"),
bytes("protocol_id", "Protocol ID", 6),
nstring8("protocol_name", "Protocol Name"),
], "MLID Board Information")
ModifyInfoStruct = struct("modify_info_struct", [
ModifiedTime,
ModifiedDate,
endian(ModifierID, ENC_LITTLE_ENDIAN),
LastAccessedDate,
], "Modification Information")
nameInfo = struct("name_info_struct", [
ObjectType,
nstring8("login_name", "Login Name"),
], "Name Information")
NCPNetworkAddress = struct("ncp_network_address_struct", [
TransportType,
Reserved3,
NetAddress,
], "Network Address")
netAddr = struct("net_addr_struct", [
TransportType,
nbytes32("transport_addr", "Transport Address"),
], "Network Address")
NetWareInformationStruct = struct("netware_information_struct", [
DataStreamSpaceAlloc, # (Data Stream Alloc Bit)
AttributesDef32, # (Attributes Bit)
FlagsDef,
DataStreamSize, # (Data Stream Size Bit)
TotalDataStreamDiskSpaceAlloc, # (Total Stream Size Bit)
NumberOfDataStreams,
CreationTime, # (Creation Bit)
CreationDate,
CreatorID,
ModifiedTime, # (Modify Bit)
ModifiedDate,
ModifierID,
LastAccessedDate,
ArchivedTime, # (Archive Bit)
ArchivedDate,
ArchiverID,
InheritedRightsMask, # (Rights Bit)
DirectoryEntryNumber, # (Directory Entry Bit)
DOSDirectoryEntryNumber,
VolumeNumberLong,
EADataSize, # (Extended Attribute Bit)
EACount,
EAKeySize,
CreatorNameSpaceNumber, # (Name Space Bit)
Reserved3,
], "NetWare Information")
NLMInformation = struct("nlm_information", [
IdentificationNumber,
NLMFlags,
Reserved3,
NLMType,
Reserved3,
ParentID,
MajorVersion,
MinorVersion,
Revision,
Year,
Reserved3,
Month,
Reserved3,
Day,
Reserved3,
AllocAvailByte,
AllocFreeCount,
LastGarbCollect,
MessageLanguage,
NumberOfReferencedPublics,
], "NLM Information")
NSInfoStruct = struct("ns_info_struct", [
CreatorNameSpaceNumber,
Reserved3,
])
NWAuditStatus = struct("nw_audit_status", [
AuditVersionDate,
AuditFileVersionDate,
val_string16("audit_enable_flag", "Auditing Enabled Flag", [
[ 0x0000, "Auditing Disabled" ],
[ 0x0001, "Auditing Enabled" ],
]),
Reserved2,
uint32("audit_file_size", "Audit File Size"),
uint32("modified_counter", "Modified Counter"),
uint32("audit_file_max_size", "Audit File Maximum Size"),
uint32("audit_file_size_threshold", "Audit File Size Threshold"),
uint32("audit_record_count", "Audit Record Count"),
uint32("auditing_flags", "Auditing Flags"),
], "NetWare Audit Status")
ObjectSecurityStruct = struct("object_security_struct", [
ObjectSecurity,
])
ObjectFlagsStruct = struct("object_flags_struct", [
ObjectFlags,
])
ObjectTypeStruct = struct("object_type_struct", [
endian(ObjectType, ENC_BIG_ENDIAN),
Reserved2,
])
ObjectNameStruct = struct("object_name_struct", [
ObjectNameStringz,
])
ObjectIDStruct = struct("object_id_struct", [
ObjectID,
Restriction,
])
ObjectIDStruct64 = struct("object_id_struct64", [
endian(ObjectID, ENC_LITTLE_ENDIAN),
endian(RestrictionQuad, ENC_LITTLE_ENDIAN),
])
OpnFilesStruct = struct("opn_files_struct", [
TaskNumberWord,
LockType,
AccessControl,
LockFlag,
VolumeNumber,
DOSParentDirectoryEntry,
DOSDirectoryEntry,
ForkCount,
NameSpace,
FileName,
], "Open Files Information")
OwnerIDStruct = struct("owner_id_struct", [
CreatorID,
])
PacketBurstInformation = struct("packet_burst_information", [
uint32("big_invalid_slot", "Big Invalid Slot Count"),
uint32("big_forged_packet", "Big Forged Packet Count"),
uint32("big_invalid_packet", "Big Invalid Packet Count"),
uint32("big_still_transmitting", "Big Still Transmitting Count"),
uint32("still_doing_the_last_req", "Still Doing The Last Request Count"),
uint32("invalid_control_req", "Invalid Control Request Count"),
uint32("control_invalid_message_number", "Control Invalid Message Number Count"),
uint32("control_being_torn_down", "Control Being Torn Down Count"),
uint32("big_repeat_the_file_read", "Big Repeat the File Read Count"),
uint32("big_send_extra_cc_count", "Big Send Extra CC Count"),
uint32("big_return_abort_mess", "Big Return Abort Message Count"),
uint32("big_read_invalid_mess", "Big Read Invalid Message Number Count"),
uint32("big_read_do_it_over", "Big Read Do It Over Count"),
uint32("big_read_being_torn_down", "Big Read Being Torn Down Count"),
uint32("previous_control_packet", "Previous Control Packet Count"),
uint32("send_hold_off_message", "Send Hold Off Message Count"),
uint32("big_read_no_data_avail", "Big Read No Data Available Count"),
uint32("big_read_trying_to_read", "Big Read Trying To Read Too Much Count"),
uint32("async_read_error", "Async Read Error Count"),
uint32("big_read_phy_read_err", "Big Read Physical Read Error Count"),
uint32("ctl_bad_ack_frag_list", "Control Bad ACK Fragment List Count"),
uint32("ctl_no_data_read", "Control No Data Read Count"),
uint32("write_dup_req", "Write Duplicate Request Count"),
uint32("shouldnt_be_ack_here", "Shouldn't Be ACKing Here Count"),
uint32("write_incon_packet_len", "Write Inconsistent Packet Lengths Count"),
uint32("first_packet_isnt_a_write", "First Packet Isn't A Write Count"),
uint32("write_trash_dup_req", "Write Trashed Duplicate Request Count"),
uint32("big_write_inv_message_num", "Big Write Invalid Message Number Count"),
uint32("big_write_being_torn_down", "Big Write Being Torn Down Count"),
uint32("big_write_being_abort", "Big Write Being Aborted Count"),
uint32("zero_ack_frag", "Zero ACK Fragment Count"),
uint32("write_curr_trans", "Write Currently Transmitting Count"),
uint32("try_to_write_too_much", "Trying To Write Too Much Count"),
uint32("write_out_of_mem_for_ctl_nodes", "Write Out Of Memory For Control Nodes Count"),
uint32("write_didnt_need_this_frag", "Write Didn't Need This Fragment Count"),
uint32("write_too_many_buf_check", "Write Too Many Buffers Checked Out Count"),
uint32("write_timeout", "Write Time Out Count"),
uint32("write_got_an_ack0", "Write Got An ACK Count 0"),
uint32("write_got_an_ack1", "Write Got An ACK Count 1"),
uint32("poll_abort_conn", "Poller Aborted The Connection Count"),
uint32("may_had_out_of_order", "Maybe Had Out Of Order Writes Count"),
uint32("had_an_out_of_order", "Had An Out Of Order Write Count"),
uint32("moved_the_ack_bit_dn", "Moved The ACK Bit Down Count"),
uint32("bumped_out_of_order", "Bumped Out Of Order Write Count"),
uint32("poll_rem_old_out_of_order", "Poller Removed Old Out Of Order Count"),
uint32("write_didnt_need_but_req_ack", "Write Didn't Need But Requested ACK Count"),
uint32("write_trash_packet", "Write Trashed Packet Count"),
uint32("too_many_ack_frag", "Too Many ACK Fragments Count"),
uint32("saved_an_out_of_order_packet", "Saved An Out Of Order Packet Count"),
uint32("conn_being_aborted", "Connection Being Aborted Count"),
], "Packet Burst Information")
PadDSSpaceAllocate = struct("pad_ds_space_alloc", [
Reserved4,
])
PadAttributes = struct("pad_attributes", [
Reserved6,
])
PadDataStreamSize = struct("pad_data_stream_size", [
Reserved4,
])
PadTotalStreamSize = struct("pad_total_stream_size", [
Reserved6,
])
PadCreationInfo = struct("pad_creation_info", [
Reserved8,
])
PadModifyInfo = struct("pad_modify_info", [
Reserved10,
])
PadArchiveInfo = struct("pad_archive_info", [
Reserved8,
])
PadRightsInfo = struct("pad_rights_info", [
Reserved2,
])
PadDirEntry = struct("pad_dir_entry", [
Reserved12,
])
PadEAInfo = struct("pad_ea_info", [
Reserved12,
])
PadNSInfo = struct("pad_ns_info", [
Reserved4,
])
PhyLockStruct = struct("phy_lock_struct", [
LoggedCount,
ShareableLockCount,
RecordStart,
RecordEnd,
LogicalConnectionNumber,
TaskNumByte,
LockType,
], "Physical Locks")
printInfo = struct("print_info_struct", [
PrintFlags,
TabSize,
Copies,
PrintToFileFlag,
BannerName,
TargetPrinter,
FormType,
], "Print Information")
ReplyLevel1Struct = struct("reply_lvl_1_struct", [
DirHandle,
VolumeNumber,
Reserved4,
], "Reply Level 1")
ReplyLevel2Struct = struct("reply_lvl_2_struct", [
VolumeNumberLong,
DirectoryBase,
DOSDirectoryBase,
NameSpace,
DirHandle,
], "Reply Level 2")
RightsInfoStruct = struct("rights_info_struct", [
InheritedRightsMask,
])
RoutersInfo = struct("routers_info", [
bytes("node", "Node", 6),
ConnectedLAN,
uint16("route_hops", "Hop Count"),
uint16("route_time", "Route Time"),
], "Router Information")
RTagStructure = struct("r_tag_struct", [
RTagNumber,
ResourceSignature,
ResourceCount,
ResourceName,
], "Resource Tag")
ScanInfoFileName = struct("scan_info_file_name", [
SalvageableFileEntryNumber,
FileName,
])
ScanInfoFileNoName = struct("scan_info_file_no_name", [
SalvageableFileEntryNumber,
])
SeachSequenceStruct = struct("search_seq", [
VolumeNumber,
DirectoryEntryNumber,
SequenceNumber,
], "Search Sequence")
Segments = struct("segments", [
uint32("volume_segment_dev_num", "Volume Segment Device Number"),
uint32("volume_segment_offset", "Volume Segment Offset"),
uint32("volume_segment_size", "Volume Segment Size"),
], "Volume Segment Information")
SemaInfoStruct = struct("sema_info_struct", [
LogicalConnectionNumber,
TaskNumByte,
])
SemaStruct = struct("sema_struct", [
OpenCount,
SemaphoreValue,
TaskNumberWord,
SemaphoreName,
], "Semaphore Information")
ServerInfo = struct("server_info", [
uint32("reply_canceled", "Reply Canceled Count"),
uint32("write_held_off", "Write Held Off Count"),
uint32("write_held_off_with_dup", "Write Held Off With Duplicate Request"),
uint32("invalid_req_type", "Invalid Request Type Count"),
uint32("being_aborted", "Being Aborted Count"),
uint32("already_doing_realloc", "Already Doing Re-Allocate Count"),
uint32("dealloc_invalid_slot", "De-Allocate Invalid Slot Count"),
uint32("dealloc_being_proc", "De-Allocate Being Processed Count"),
uint32("dealloc_forged_packet", "De-Allocate Forged Packet Count"),
uint32("dealloc_still_transmit", "De-Allocate Still Transmitting Count"),
uint32("start_station_error", "Start Station Error Count"),
uint32("invalid_slot", "Invalid Slot Count"),
uint32("being_processed", "Being Processed Count"),
uint32("forged_packet", "Forged Packet Count"),
uint32("still_transmitting", "Still Transmitting Count"),
uint32("reexecute_request", "Re-Execute Request Count"),
uint32("invalid_sequence_number", "Invalid Sequence Number Count"),
uint32("dup_is_being_sent", "Duplicate Is Being Sent Already Count"),
uint32("sent_pos_ack", "Sent Positive Acknowledge Count"),
uint32("sent_a_dup_reply", "Sent A Duplicate Reply Count"),
uint32("no_mem_for_station", "No Memory For Station Control Count"),
uint32("no_avail_conns", "No Available Connections Count"),
uint32("realloc_slot", "Re-Allocate Slot Count"),
uint32("realloc_slot_came_too_soon", "Re-Allocate Slot Came Too Soon Count"),
], "Server Information")
ServersSrcInfo = struct("servers_src_info", [
ServerNode,
ConnectedLAN,
HopsToNet,
], "Source Server Information")
SpaceStruct = struct("space_struct", [
Level,
MaxSpace,
CurrentSpace,
], "Space Information")
SPXInformation = struct("spx_information", [
uint16("spx_max_conn", "SPX Max Connections Count"),
uint16("spx_max_used_conn", "SPX Max Used Connections"),
uint16("spx_est_conn_req", "SPX Establish Connection Requests"),
uint16("spx_est_conn_fail", "SPX Establish Connection Fail"),
uint16("spx_listen_con_req", "SPX Listen Connect Request"),
uint16("spx_listen_con_fail", "SPX Listen Connect Fail"),
uint32("spx_send", "SPX Send Count"),
uint32("spx_window_choke", "SPX Window Choke Count"),
uint16("spx_bad_send", "SPX Bad Send Count"),
uint16("spx_send_fail", "SPX Send Fail Count"),
uint16("spx_abort_conn", "SPX Aborted Connection"),
uint32("spx_listen_pkt", "SPX Listen Packet Count"),
uint16("spx_bad_listen", "SPX Bad Listen Count"),
uint32("spx_incoming_pkt", "SPX Incoming Packet Count"),
uint16("spx_bad_in_pkt", "SPX Bad In Packet Count"),
uint16("spx_supp_pkt", "SPX Suppressed Packet Count"),
uint16("spx_no_ses_listen", "SPX No Session Listen ECB Count"),
uint16("spx_watch_dog", "SPX Watch Dog Destination Session Count"),
], "SPX Information")
StackInfo = struct("stack_info", [
StackNumber,
fw_string("stack_short_name", "Stack Short Name", 16),
], "Stack Information")
statsInfo = struct("stats_info_struct", [
TotalBytesRead,
TotalBytesWritten,
TotalRequest,
], "Statistics")
TaskStruct = struct("task_struct", [
TaskNumberWord,
TaskState,
], "Task Information")
theTimeStruct = struct("the_time_struct", [
UTCTimeInSeconds,
FractionalSeconds,
TimesyncStatus,
])
timeInfo = struct("time_info", [
Year,
Month,
Day,
Hour,
Minute,
Second,
DayOfWeek,
uint32("login_expiration_time", "Login Expiration Time"),
])
TotalStreamSizeStruct = struct("total_stream_size_struct", [
TtlDSDskSpaceAlloc,
NumberOfDataStreams,
])
TrendCounters = struct("trend_counters", [
uint32("num_of_cache_checks", "Number Of Cache Checks"),
uint32("num_of_cache_hits", "Number Of Cache Hits"),
uint32("num_of_dirty_cache_checks", "Number Of Dirty Cache Checks"),
uint32("num_of_cache_dirty_checks", "Number Of Cache Dirty Checks"),
uint32("cache_used_while_check", "Cache Used While Checking"),
uint32("wait_till_dirty_blcks_dec", "Wait Till Dirty Blocks Decrease Count"),
uint32("alloc_blck_frm_avail", "Allocate Block From Available Count"),
uint32("alloc_blck_frm_lru", "Allocate Block From LRU Count"),
uint32("alloc_blck_already_wait", "Allocate Block Already Waiting"),
uint32("lru_sit_time", "LRU Sitting Time"),
uint32("num_of_cache_check_no_wait", "Number Of Cache Check No Wait"),
uint32("num_of_cache_hits_no_wait", "Number Of Cache Hits No Wait"),
], "Trend Counters")
TrusteeStruct = struct("trustee_struct", [
endian(ObjectID, ENC_LITTLE_ENDIAN),
AccessRightsMaskWord,
])
UpdateDateStruct = struct("update_date_struct", [
UpdateDate,
])
UpdateIDStruct = struct("update_id_struct", [
UpdateID,
])
UpdateTimeStruct = struct("update_time_struct", [
UpdateTime,
])
UserInformation = struct("user_info", [
endian(ConnectionNumber, ENC_LITTLE_ENDIAN),
UseCount,
Reserved2,
ConnectionServiceType,
Year,
Month,
Day,
Hour,
Minute,
Second,
DayOfWeek,
Status,
Reserved2,
ExpirationTime,
ObjectType,
Reserved2,
TransactionTrackingFlag,
LogicalLockThreshold,
FileWriteFlags,
FileWriteState,
Reserved,
FileLockCount,
RecordLockCount,
TotalBytesRead,
TotalBytesWritten,
TotalRequest,
HeldRequests,
HeldBytesRead,
HeldBytesWritten,
], "User Information")
VolInfoStructure = struct("vol_info_struct", [
VolumeType,
Reserved2,
StatusFlagBits,
SectorSize,
SectorsPerClusterLong,
VolumeSizeInClusters,
FreedClusters,
SubAllocFreeableClusters,
FreeableLimboSectors,
NonFreeableLimboSectors,
NonFreeableAvailableSubAllocSectors,
NotUsableSubAllocSectors,
SubAllocClusters,
DataStreamsCount,
LimboDataStreamsCount,
OldestDeletedFileAgeInTicks,
CompressedDataStreamsCount,
CompressedLimboDataStreamsCount,
UnCompressableDataStreamsCount,
PreCompressedSectors,
CompressedSectors,
MigratedFiles,
MigratedSectors,
ClustersUsedByFAT,
ClustersUsedByDirectories,
ClustersUsedByExtendedDirectories,
TotalDirectoryEntries,
UnUsedDirectoryEntries,
TotalExtendedDirectoryExtents,
UnUsedExtendedDirectoryExtents,
ExtendedAttributesDefined,
ExtendedAttributeExtentsUsed,
DirectoryServicesObjectID,
VolumeEpochTime,
], "Volume Information")
VolInfoStructure64 = struct("vol_info_struct64", [
VolumeTypeLong,
StatusFlagBits,
uint64("sectoresize64", "Sector Size"),
uint64("sectorspercluster64", "Sectors Per Cluster"),
uint64("volumesizeinclusters64", "Volume Size in Clusters"),
uint64("freedclusters64", "Freed Clusters"),
uint64("suballocfreeableclusters64", "Sub Alloc Freeable Clusters"),
uint64("freeablelimbosectors64", "Freeable Limbo Sectors"),
uint64("nonfreeablelimbosectors64", "Non-Freeable Limbo Sectors"),
uint64("nonfreeableavailalesuballocsectors64", "Non-Freeable Available Sub Alloc Sectors"),
uint64("notusablesuballocsectors64", "Not Usable Sub Alloc Sectors"),
uint64("suballocclusters64", "Sub Alloc Clusters"),
uint64("datastreamscount64", "Data Streams Count"),
uint64("limbodatastreamscount64", "Limbo Data Streams Count"),
uint64("oldestdeletedfileageinticks64", "Oldest Deleted File Age in Ticks"),
uint64("compressdatastreamscount64", "Compressed Data Streams Count"),
uint64("compressedlimbodatastreamscount64", "Compressed Limbo Data Streams Count"),
uint64("uncompressabledatastreamscount64", "Uncompressable Data Streams Count"),
uint64("precompressedsectors64", "Precompressed Sectors"),
uint64("compressedsectors64", "Compressed Sectors"),
uint64("migratedfiles64", "Migrated Files"),
uint64("migratedsectors64", "Migrated Sectors"),
uint64("clustersusedbyfat64", "Clusters Used by FAT"),
uint64("clustersusedbydirectories64", "Clusters Used by Directories"),
uint64("clustersusedbyextendeddirectories64", "Clusters Used by Extended Directories"),
uint64("totaldirectoryentries64", "Total Directory Entries"),
uint64("unuseddirectoryentries64", "Unused Directory Entries"),
uint64("totalextendeddirectoryextents64", "Total Extended Directory Extents"),
uint64("unusedextendeddirectoryextents64", "Unused Total Extended Directory Extents"),
uint64("extendedattributesdefined64", "Extended Attributes Defined"),
uint64("extendedattributeextentsused64", "Extended Attribute Extents Used"),
uint64("directoryservicesobjectid64", "Directory Services Object ID"),
VolumeEpochTime,
], "Volume Information")
VolInfo2Struct = struct("vol_info_struct_2", [
uint32("volume_active_count", "Volume Active Count"),
uint32("volume_use_count", "Volume Use Count"),
uint32("mac_root_ids", "MAC Root IDs"),
VolumeEpochTime,
uint32("volume_reference_count", "Volume Reference Count"),
uint32("compression_lower_limit", "Compression Lower Limit"),
uint32("outstanding_ios", "Outstanding IOs"),
uint32("outstanding_compression_ios", "Outstanding Compression IOs"),
uint32("compression_ios_limit", "Compression IOs Limit"),
], "Extended Volume Information")
VolumeWithNameStruct = struct("volume_with_name_struct", [
VolumeNumberLong,
VolumeNameLen,
])
VolumeStruct = struct("volume_struct", [
VolumeNumberLong,
])
zFileMap_Allocation = struct("zfilemap_allocation_struct", [
uint64("extent_byte_offset", "Byte Offset"),
endian(uint64("extent_length_alloc", "Length"), ENC_LITTLE_ENDIAN),
#ExtentLength,
], "File Map Allocation")
zFileMap_Logical = struct("zfilemap_logical_struct", [
uint64("extent_block_number", "Block Number"),
uint64("extent_number_of_blocks", "Number of Blocks"),
], "File Map Logical")
zFileMap_Physical = struct("zfilemap_physical_struct", [
uint64("extent_length_physical", "Length"),
uint64("extent_logical_offset", "Logical Offset"),
uint64("extent_pool_offset", "Pool Offset"),
uint64("extent_physical_offset", "Physical Offset"),
fw_string("extent_device_id", "Device ID", 8),
], "File Map Physical")
##############################################################################
# NCP Groups
##############################################################################
def define_groups():
groups['accounting'] = "Accounting"
groups['afp'] = "AFP"
groups['auditing'] = "Auditing"
groups['bindery'] = "Bindery"
groups['connection'] = "Connection"
groups['enhanced'] = "Enhanced File System"
groups['extended'] = "Extended Attribute"
groups['extension'] = "NCP Extension"
groups['file'] = "File System"
groups['fileserver'] = "File Server Environment"
groups['message'] = "Message"
groups['migration'] = "Data Migration"
groups['nds'] = "Novell Directory Services"
groups['pburst'] = "Packet Burst"
groups['print'] = "Print"
groups['remote'] = "Remote"
groups['sync'] = "Synchronization"
groups['tsync'] = "Time Synchronization"
groups['tts'] = "Transaction Tracking"
groups['qms'] = "Queue Management System (QMS)"
groups['stats'] = "Server Statistics"
groups['nmas'] = "Novell Modular Authentication Service"
groups['sss'] = "SecretStore Services"
##############################################################################
# NCP Errors
##############################################################################
def define_errors():
errors[0x0000] = "Ok"
errors[0x0001] = "Transaction tracking is available"
errors[0x0002] = "Ok. The data has been written"
errors[0x0003] = "Calling Station is a Manager"
errors[0x0100] = "One or more of the Connection Numbers in the send list are invalid"
errors[0x0101] = "Invalid space limit"
errors[0x0102] = "Insufficient disk space"
errors[0x0103] = "Queue server cannot add jobs"
errors[0x0104] = "Out of disk space"
errors[0x0105] = "Semaphore overflow"
errors[0x0106] = "Invalid Parameter"
errors[0x0107] = "Invalid Number of Minutes to Delay"
errors[0x0108] = "Invalid Start or Network Number"
errors[0x0109] = "Cannot Obtain License"
errors[0x010a] = "No Purgeable Files Available"
errors[0x0200] = "One or more clients in the send list are not logged in"
errors[0x0201] = "Queue server cannot attach"
errors[0x0300] = "One or more clients in the send list are not accepting messages"
errors[0x0400] = "Client already has message"
errors[0x0401] = "Queue server cannot service job"
errors[0x7300] = "Revoke Handle Rights Not Found"
errors[0x7700] = "Buffer Too Small"
errors[0x7900] = "Invalid Parameter in Request Packet"
errors[0x7901] = "Nothing being Compressed"
errors[0x7902] = "No Items Found"
errors[0x7a00] = "Connection Already Temporary"
errors[0x7b00] = "Connection Already Logged in"
errors[0x7c00] = "Connection Not Authenticated"
errors[0x7d00] = "Connection Not Logged In"
errors[0x7e00] = "NCP failed boundary check"
errors[0x7e01] = "Invalid Length"
errors[0x7f00] = "Lock Waiting"
errors[0x8000] = "Lock fail"
errors[0x8001] = "File in Use"
errors[0x8100] = "A file handle could not be allocated by the file server"
errors[0x8101] = "Out of File Handles"
errors[0x8200] = "Unauthorized to open the file"
errors[0x8300] = "Unable to read/write the volume. Possible bad sector on the file server"
errors[0x8301] = "Hard I/O Error"
errors[0x8400] = "Unauthorized to create the directory"
errors[0x8401] = "Unauthorized to create the file"
errors[0x8500] = "Unauthorized to delete the specified file"
errors[0x8501] = "Unauthorized to overwrite an existing file in this directory"
errors[0x8700] = "An unexpected character was encountered in the filename"
errors[0x8701] = "Create Filename Error"
errors[0x8800] = "Invalid file handle"
errors[0x8900] = "Unauthorized to search this file/directory"
errors[0x8a00] = "Unauthorized to delete this file/directory"
errors[0x8b00] = "Unauthorized to rename a file in this directory"
errors[0x8c00] = "No set privileges"
errors[0x8c01] = "Unauthorized to modify a file in this directory"
errors[0x8c02] = "Unauthorized to change the restriction on this volume"
errors[0x8d00] = "Some of the affected files are in use by another client"
errors[0x8d01] = "The affected file is in use"
errors[0x8e00] = "All of the affected files are in use by another client"
errors[0x8f00] = "Some of the affected files are read-only"
errors[0x9000] = "An attempt to modify a read-only volume occurred"
errors[0x9001] = "All of the affected files are read-only"
errors[0x9002] = "Read Only Access to Volume"
errors[0x9100] = "Some of the affected files already exist"
errors[0x9101] = "Some Names Exist"
errors[0x9200] = "Directory with the new name already exists"
errors[0x9201] = "All of the affected files already exist"
errors[0x9300] = "Unauthorized to read from this file"
errors[0x9400] = "Unauthorized to write to this file"
errors[0x9500] = "The affected file is detached"
errors[0x9600] = "The file server has run out of memory to service this request"
errors[0x9601] = "No alloc space for message"
errors[0x9602] = "Server Out of Space"
errors[0x9800] = "The affected volume is not mounted"
errors[0x9801] = "The volume associated with Volume Number is not mounted"
errors[0x9802] = "The resulting volume does not exist"
errors[0x9803] = "The destination volume is not mounted"
errors[0x9804] = "Disk Map Error"
errors[0x9900] = "The file server has run out of directory space on the affected volume"
errors[0x9a00] = "Invalid request to rename the affected file to another volume"
errors[0x9b00] = "DirHandle is not associated with a valid directory path"
errors[0x9b01] = "A resulting directory handle is not associated with a valid directory path"
errors[0x9b02] = "The directory associated with DirHandle does not exist"
errors[0x9b03] = "Bad directory handle"
errors[0x9c00] = "The resulting path is not valid"
errors[0x9c01] = "The resulting file path is not valid"
errors[0x9c02] = "The resulting directory path is not valid"
errors[0x9c03] = "Invalid path"
errors[0x9c04] = "No more trustees found, based on requested search sequence number"
errors[0x9d00] = "A directory handle was not available for allocation"
errors[0x9e00] = "The name of the directory does not conform to a legal name for this name space"
errors[0x9e01] = "The new directory name does not conform to a legal name for this name space"
errors[0x9e02] = "Bad File Name"
errors[0x9f00] = "The request attempted to delete a directory that is in use by another client"
errors[0xa000] = "The request attempted to delete a directory that is not empty"
errors[0xa100] = "An unrecoverable error occurred on the affected directory"
errors[0xa200] = "The request attempted to read from a file region that is physically locked"
errors[0xa201] = "I/O Lock Error"
errors[0xa400] = "Invalid directory rename attempted"
errors[0xa500] = "Invalid open create mode"
errors[0xa600] = "Auditor Access has been Removed"
errors[0xa700] = "Error Auditing Version"
errors[0xa800] = "Invalid Support Module ID"
errors[0xa801] = "No Auditing Access Rights"
errors[0xa802] = "No Access Rights"
errors[0xa900] = "Error Link in Path"
errors[0xa901] = "Invalid Path With Junction Present"
errors[0xaa00] = "Invalid Data Type Flag"
errors[0xac00] = "Packet Signature Required"
errors[0xbe00] = "Invalid Data Stream"
errors[0xbf00] = "Requests for this name space are not valid on this volume"
errors[0xc000] = "Unauthorized to retrieve accounting data"
errors[0xc100] = "The ACCOUNT_BALANCE property does not exist"
errors[0xc101] = "No Account Balance"
errors[0xc200] = "The object has exceeded its credit limit"
errors[0xc300] = "Too many holds have been placed against this account"
errors[0xc400] = "The client account has been disabled"
errors[0xc500] = "Access to the account has been denied because of intruder detection"
errors[0xc501] = "Login lockout"
errors[0xc502] = "Server Login Locked"
errors[0xc600] = "The caller does not have operator privileges"
errors[0xc601] = "The client does not have operator privileges"
errors[0xc800] = "Missing EA Key"
errors[0xc900] = "EA Not Found"
errors[0xca00] = "Invalid EA Handle Type"
errors[0xcb00] = "EA No Key No Data"
errors[0xcc00] = "EA Number Mismatch"
errors[0xcd00] = "Extent Number Out of Range"
errors[0xce00] = "EA Bad Directory Number"
errors[0xcf00] = "Invalid EA Handle"
errors[0xd000] = "Queue error"
errors[0xd001] = "EA Position Out of Range"
errors[0xd100] = "The queue does not exist"
errors[0xd101] = "EA Access Denied"
errors[0xd200] = "A queue server is not associated with this queue"
errors[0xd201] = "A queue server is not associated with the selected queue"
errors[0xd202] = "No queue server"
errors[0xd203] = "Data Page Odd Size"
errors[0xd300] = "No queue rights"
errors[0xd301] = "EA Volume Not Mounted"
errors[0xd400] = "The queue is full and cannot accept another request"
errors[0xd401] = "The queue associated with ObjectId is full and cannot accept another request"
errors[0xd402] = "Bad Page Boundary"
errors[0xd500] = "A job does not exist in this queue"
errors[0xd501] = "No queue job"
errors[0xd502] = "The job associated with JobNumber does not exist in this queue"
errors[0xd503] = "Inspect Failure"
errors[0xd504] = "Unknown NCP Extension Number"
errors[0xd600] = "The file server does not allow unencrypted passwords"
errors[0xd601] = "No job right"
errors[0xd602] = "EA Already Claimed"
errors[0xd700] = "Bad account"
errors[0xd701] = "The old and new password strings are identical"
errors[0xd702] = "The job is currently being serviced"
errors[0xd703] = "The queue is currently servicing a job"
errors[0xd704] = "Queue servicing"
errors[0xd705] = "Odd Buffer Size"
errors[0xd800] = "Queue not active"
errors[0xd801] = "No Scorecards"
errors[0xd900] = "The file server cannot accept another connection as it has reached its limit"
errors[0xd901] = "The client is not security equivalent to one of the objects in the Q_SERVERS group property of the target queue"
errors[0xd902] = "Queue Station is not a server"
errors[0xd903] = "Bad EDS Signature"
errors[0xd904] = "Attempt to log in using an account which has limits on the number of concurrent connections and that number has been reached."
errors[0xda00] = "Attempted to login to the file server during a restricted time period"
errors[0xda01] = "Queue halted"
errors[0xda02] = "EA Space Limit"
errors[0xdb00] = "Attempted to login to the file server from an unauthorized workstation or network"
errors[0xdb01] = "The queue cannot attach another queue server"
errors[0xdb02] = "Maximum queue servers"
errors[0xdb03] = "EA Key Corrupt"
errors[0xdc00] = "Account Expired"
errors[0xdc01] = "EA Key Limit"
errors[0xdd00] = "Tally Corrupt"
errors[0xde00] = "Attempted to login to the file server with an incorrect password"
errors[0xdf00] = "Attempted to login to the file server with a password that has expired"
errors[0xe000] = "No Login Connections Available"
errors[0xe700] = "No disk track"
errors[0xe800] = "Write to group"
errors[0xe900] = "The object is already a member of the group property"
errors[0xea00] = "No such member"
errors[0xea01] = "The bindery object is not a member of the set"
errors[0xea02] = "Non-existent member"
errors[0xeb00] = "The property is not a set property"
errors[0xec00] = "No such set"
errors[0xec01] = "The set property does not exist"
errors[0xed00] = "Property exists"
errors[0xed01] = "The property already exists"
errors[0xed02] = "An attempt was made to create a bindery object property that already exists"
errors[0xee00] = "The object already exists"
errors[0xee01] = "The bindery object already exists"
errors[0xef00] = "Illegal name"
errors[0xef01] = "Illegal characters in ObjectName field"
errors[0xef02] = "Invalid name"
errors[0xf000] = "A wildcard was detected in a field that does not support wildcards"
errors[0xf001] = "An illegal wildcard was detected in ObjectName"
errors[0xf100] = "The client does not have the rights to access this bindery object"
errors[0xf101] = "Bindery security"
errors[0xf102] = "Invalid bindery security"
errors[0xf200] = "Unauthorized to read from this object"
errors[0xf300] = "Unauthorized to rename this object"
errors[0xf400] = "Unauthorized to delete this object"
errors[0xf401] = "No object delete privileges"
errors[0xf402] = "Unauthorized to delete this queue"
errors[0xf500] = "Unauthorized to create this object"
errors[0xf501] = "No object create"
errors[0xf600] = "No property delete"
errors[0xf601] = "Unauthorized to delete the property of this object"
errors[0xf602] = "Unauthorized to delete this property"
errors[0xf700] = "Unauthorized to create this property"
errors[0xf701] = "No property create privilege"
errors[0xf800] = "Unauthorized to write to this property"
errors[0xf900] = "Unauthorized to read this property"
errors[0xfa00] = "Temporary remap error"
errors[0xfb00] = "No such property"
errors[0xfb01] = "The file server does not support this request"
errors[0xfb02] = "The specified property does not exist"
errors[0xfb03] = "The PASSWORD property does not exist for this bindery object"
errors[0xfb04] = "NDS NCP not available"
errors[0xfb05] = "Bad Directory Handle"
errors[0xfb06] = "Unknown Request"
errors[0xfb07] = "Invalid Subfunction Request"
errors[0xfb08] = "Attempt to use an invalid parameter (drive number, path, or flag value) during a set drive path call"
errors[0xfb09] = "NMAS not running on this server, NCP NOT Supported"
errors[0xfb0a] = "Station Not Logged In"
errors[0xfb0b] = "Secret Store not running on this server, NCP Not supported"
errors[0xfc00] = "The message queue cannot accept another message"
errors[0xfc01] = "The trustee associated with ObjectId does not exist"
errors[0xfc02] = "The specified bindery object does not exist"
errors[0xfc03] = "The bindery object associated with ObjectID does not exist"
errors[0xfc04] = "A bindery object does not exist that matches"
errors[0xfc05] = "The specified queue does not exist"
errors[0xfc06] = "No such object"
errors[0xfc07] = "The queue associated with ObjectID does not exist"
errors[0xfd00] = "Bad station number"
errors[0xfd01] = "The connection associated with ConnectionNumber is not active"
errors[0xfd02] = "Lock collision"
errors[0xfd03] = "Transaction tracking is disabled"
errors[0xfe00] = "I/O failure"
errors[0xfe01] = "The files containing the bindery on the file server are locked"
errors[0xfe02] = "A file with the specified name already exists in this directory"
errors[0xfe03] = "No more restrictions were found"
errors[0xfe04] = "The file server was unable to lock the file within the specified time limit"
errors[0xfe05] = "The file server was unable to lock all files within the specified time limit"
errors[0xfe06] = "The bindery object associated with ObjectID is not a valid trustee"
errors[0xfe07] = "Directory locked"
errors[0xfe08] = "Bindery locked"
errors[0xfe09] = "Invalid semaphore name length"
errors[0xfe0a] = "The file server was unable to complete the operation within the specified time limit"
errors[0xfe0b] = "Transaction restart"
errors[0xfe0c] = "Bad packet"
errors[0xfe0d] = "Timeout"
errors[0xfe0e] = "User Not Found"
errors[0xfe0f] = "Trustee Not Found"
errors[0xff00] = "Failure"
errors[0xff01] = "Lock error"
errors[0xff02] = "File not found"
errors[0xff03] = "The file not found or cannot be unlocked"
errors[0xff04] = "Record not found"
errors[0xff05] = "The logical record was not found"
errors[0xff06] = "The printer associated with Printer Number does not exist"
errors[0xff07] = "No such printer"
errors[0xff08] = "Unable to complete the request"
errors[0xff09] = "Unauthorized to change privileges of this trustee"
errors[0xff0a] = "No files matching the search criteria were found"
errors[0xff0b] = "A file matching the search criteria was not found"
errors[0xff0c] = "Verification failed"
errors[0xff0d] = "Object associated with ObjectID is not a manager"
errors[0xff0e] = "Invalid initial semaphore value"
errors[0xff0f] = "The semaphore handle is not valid"
errors[0xff10] = "SemaphoreHandle is not associated with a valid sempahore"
errors[0xff11] = "Invalid semaphore handle"
errors[0xff12] = "Transaction tracking is not available"
errors[0xff13] = "The transaction has not yet been written to disk"
errors[0xff14] = "Directory already exists"
errors[0xff15] = "The file already exists and the deletion flag was not set"
errors[0xff16] = "No matching files or directories were found"
errors[0xff17] = "A file or directory matching the search criteria was not found"
errors[0xff18] = "The file already exists"
errors[0xff19] = "Failure, No files found"
errors[0xff1a] = "Unlock Error"
errors[0xff1b] = "I/O Bound Error"
errors[0xff1c] = "Not Accepting Messages"
errors[0xff1d] = "No More Salvageable Files in Directory"
errors[0xff1e] = "Calling Station is Not a Manager"
errors[0xff1f] = "Bindery Failure"
errors[0xff20] = "NCP Extension Not Found"
errors[0xff21] = "Audit Property Not Found"
errors[0xff22] = "Server Set Parameter Not Found"
##############################################################################
# Produce C code
##############################################################################
def ExamineVars(vars, structs_hash, vars_hash):
for var in vars:
if isinstance(var, struct):
structs_hash[var.HFName()] = var
struct_vars = var.Variables()
ExamineVars(struct_vars, structs_hash, vars_hash)
else:
vars_hash[repr(var)] = var
if isinstance(var, bitfield):
sub_vars = var.SubVariables()
ExamineVars(sub_vars, structs_hash, vars_hash)
def produce_code():
global errors
print("/*")
print(" * Do not modify this file. Changes will be overwritten.")
print(" * Generated automatically from %s" % (sys.argv[0]))
print(" */\n")
print("""
/*
* Portions Copyright (c) Gilbert Ramirez 2000-2002
* Portions Copyright (c) Novell, Inc. 2000-2005
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "config.h"
#include <string.h>
#include <glib.h>
#include <epan/packet.h>
#include <epan/dfilter/dfilter.h>
#include <epan/exceptions.h>
#include <ftypes/ftypes.h>
#include <epan/to_str.h>
#include <epan/conversation.h>
#include <epan/ptvcursor.h>
#include <epan/strutil.h>
#include <epan/reassemble.h>
#include <epan/tap.h>
#include <epan/proto_data.h>
#include "packet-ncp-int.h"
#include "packet-ncp-nmas.h"
#include "packet-ncp-sss.h"
/* Function declarations for functions used in proto_register_ncp2222() */
void proto_register_ncp2222(void);
/* Endianness macros */
#define NO_ENDIANNESS 0
#define NO_LENGTH -1
/* We use this int-pointer as a special flag in ptvc_record's */
static int ptvc_struct_int_storage;
#define PTVC_STRUCT (&ptvc_struct_int_storage)
/* Values used in the count-variable ("var"/"repeat") logic. */""")
if global_highest_var > -1:
print("#define NUM_REPEAT_VARS %d" % (global_highest_var + 1))
print("static unsigned repeat_vars[NUM_REPEAT_VARS];")
else:
print("#define NUM_REPEAT_VARS 0")
print("static unsigned *repeat_vars = NULL;")
print("""
#define NO_VAR NUM_REPEAT_VARS
#define NO_REPEAT NUM_REPEAT_VARS
#define REQ_COND_SIZE_CONSTANT 0
#define REQ_COND_SIZE_VARIABLE 1
#define NO_REQ_COND_SIZE 0
#define NTREE 0x00020000
#define NDEPTH 0x00000002
#define NREV 0x00000004
#define NFLAGS 0x00000008
static int hf_ncp_number_of_data_streams_long = -1;
static int hf_ncp_func = -1;
static int hf_ncp_length = -1;
static int hf_ncp_subfunc = -1;
static int hf_ncp_group = -1;
static int hf_ncp_fragment_handle = -1;
static int hf_ncp_completion_code = -1;
static int hf_ncp_connection_status = -1;
static int hf_ncp_req_frame_num = -1;
static int hf_ncp_req_frame_time = -1;
static int hf_ncp_fragment_size = -1;
static int hf_ncp_message_size = -1;
static int hf_ncp_nds_flag = -1;
static int hf_ncp_nds_verb = -1;
static int hf_ping_version = -1;
/* static int hf_nds_version = -1; */
/* static int hf_nds_flags = -1; */
static int hf_nds_reply_depth = -1;
static int hf_nds_reply_rev = -1;
static int hf_nds_reply_flags = -1;
static int hf_nds_p1type = -1;
static int hf_nds_uint32value = -1;
static int hf_nds_bit1 = -1;
static int hf_nds_bit2 = -1;
static int hf_nds_bit3 = -1;
static int hf_nds_bit4 = -1;
static int hf_nds_bit5 = -1;
static int hf_nds_bit6 = -1;
static int hf_nds_bit7 = -1;
static int hf_nds_bit8 = -1;
static int hf_nds_bit9 = -1;
static int hf_nds_bit10 = -1;
static int hf_nds_bit11 = -1;
static int hf_nds_bit12 = -1;
static int hf_nds_bit13 = -1;
static int hf_nds_bit14 = -1;
static int hf_nds_bit15 = -1;
static int hf_nds_bit16 = -1;
static int hf_outflags = -1;
static int hf_bit1outflags = -1;
static int hf_bit2outflags = -1;
static int hf_bit3outflags = -1;
static int hf_bit4outflags = -1;
static int hf_bit5outflags = -1;
static int hf_bit6outflags = -1;
static int hf_bit7outflags = -1;
static int hf_bit8outflags = -1;
static int hf_bit9outflags = -1;
static int hf_bit10outflags = -1;
static int hf_bit11outflags = -1;
static int hf_bit12outflags = -1;
static int hf_bit13outflags = -1;
static int hf_bit14outflags = -1;
static int hf_bit15outflags = -1;
static int hf_bit16outflags = -1;
static int hf_bit1nflags = -1;
static int hf_bit2nflags = -1;
static int hf_bit3nflags = -1;
static int hf_bit4nflags = -1;
static int hf_bit5nflags = -1;
static int hf_bit6nflags = -1;
static int hf_bit7nflags = -1;
static int hf_bit8nflags = -1;
static int hf_bit9nflags = -1;
static int hf_bit10nflags = -1;
static int hf_bit11nflags = -1;
static int hf_bit12nflags = -1;
static int hf_bit13nflags = -1;
static int hf_bit14nflags = -1;
static int hf_bit15nflags = -1;
static int hf_bit16nflags = -1;
static int hf_bit1rflags = -1;
static int hf_bit2rflags = -1;
static int hf_bit3rflags = -1;
static int hf_bit4rflags = -1;
static int hf_bit5rflags = -1;
static int hf_bit6rflags = -1;
static int hf_bit7rflags = -1;
static int hf_bit8rflags = -1;
static int hf_bit9rflags = -1;
static int hf_bit10rflags = -1;
static int hf_bit11rflags = -1;
static int hf_bit12rflags = -1;
static int hf_bit13rflags = -1;
static int hf_bit14rflags = -1;
static int hf_bit15rflags = -1;
static int hf_bit16rflags = -1;
static int hf_cflags = -1;
static int hf_bit1cflags = -1;
static int hf_bit2cflags = -1;
static int hf_bit3cflags = -1;
static int hf_bit4cflags = -1;
static int hf_bit5cflags = -1;
static int hf_bit6cflags = -1;
static int hf_bit7cflags = -1;
static int hf_bit8cflags = -1;
static int hf_bit9cflags = -1;
static int hf_bit10cflags = -1;
static int hf_bit11cflags = -1;
static int hf_bit12cflags = -1;
static int hf_bit13cflags = -1;
static int hf_bit14cflags = -1;
static int hf_bit15cflags = -1;
static int hf_bit16cflags = -1;
static int hf_bit1acflags = -1;
static int hf_bit2acflags = -1;
static int hf_bit3acflags = -1;
static int hf_bit4acflags = -1;
static int hf_bit5acflags = -1;
static int hf_bit6acflags = -1;
static int hf_bit7acflags = -1;
static int hf_bit8acflags = -1;
static int hf_bit9acflags = -1;
static int hf_bit10acflags = -1;
static int hf_bit11acflags = -1;
static int hf_bit12acflags = -1;
static int hf_bit13acflags = -1;
static int hf_bit14acflags = -1;
static int hf_bit15acflags = -1;
static int hf_bit16acflags = -1;
static int hf_vflags = -1;
static int hf_bit1vflags = -1;
static int hf_bit2vflags = -1;
static int hf_bit3vflags = -1;
static int hf_bit4vflags = -1;
static int hf_bit5vflags = -1;
static int hf_bit6vflags = -1;
static int hf_bit7vflags = -1;
static int hf_bit8vflags = -1;
static int hf_bit9vflags = -1;
static int hf_bit10vflags = -1;
static int hf_bit11vflags = -1;
static int hf_bit12vflags = -1;
static int hf_bit13vflags = -1;
static int hf_bit14vflags = -1;
static int hf_bit15vflags = -1;
static int hf_bit16vflags = -1;
static int hf_eflags = -1;
static int hf_bit1eflags = -1;
static int hf_bit2eflags = -1;
static int hf_bit3eflags = -1;
static int hf_bit4eflags = -1;
static int hf_bit5eflags = -1;
static int hf_bit6eflags = -1;
static int hf_bit7eflags = -1;
static int hf_bit8eflags = -1;
static int hf_bit9eflags = -1;
static int hf_bit10eflags = -1;
static int hf_bit11eflags = -1;
static int hf_bit12eflags = -1;
static int hf_bit13eflags = -1;
static int hf_bit14eflags = -1;
static int hf_bit15eflags = -1;
static int hf_bit16eflags = -1;
static int hf_infoflagsl = -1;
static int hf_retinfoflagsl = -1;
static int hf_bit1infoflagsl = -1;
static int hf_bit2infoflagsl = -1;
static int hf_bit3infoflagsl = -1;
static int hf_bit4infoflagsl = -1;
static int hf_bit5infoflagsl = -1;
static int hf_bit6infoflagsl = -1;
static int hf_bit7infoflagsl = -1;
static int hf_bit8infoflagsl = -1;
static int hf_bit9infoflagsl = -1;
static int hf_bit10infoflagsl = -1;
static int hf_bit11infoflagsl = -1;
static int hf_bit12infoflagsl = -1;
static int hf_bit13infoflagsl = -1;
static int hf_bit14infoflagsl = -1;
static int hf_bit15infoflagsl = -1;
static int hf_bit16infoflagsl = -1;
static int hf_infoflagsh = -1;
static int hf_bit1infoflagsh = -1;
static int hf_bit2infoflagsh = -1;
static int hf_bit3infoflagsh = -1;
static int hf_bit4infoflagsh = -1;
static int hf_bit5infoflagsh = -1;
static int hf_bit6infoflagsh = -1;
static int hf_bit7infoflagsh = -1;
static int hf_bit8infoflagsh = -1;
static int hf_bit9infoflagsh = -1;
static int hf_bit10infoflagsh = -1;
static int hf_bit11infoflagsh = -1;
static int hf_bit12infoflagsh = -1;
static int hf_bit13infoflagsh = -1;
static int hf_bit14infoflagsh = -1;
static int hf_bit15infoflagsh = -1;
static int hf_bit16infoflagsh = -1;
static int hf_retinfoflagsh = -1;
static int hf_bit1retinfoflagsh = -1;
static int hf_bit2retinfoflagsh = -1;
static int hf_bit3retinfoflagsh = -1;
static int hf_bit4retinfoflagsh = -1;
static int hf_bit5retinfoflagsh = -1;
static int hf_bit6retinfoflagsh = -1;
static int hf_bit7retinfoflagsh = -1;
static int hf_bit8retinfoflagsh = -1;
static int hf_bit9retinfoflagsh = -1;
static int hf_bit10retinfoflagsh = -1;
static int hf_bit11retinfoflagsh = -1;
static int hf_bit12retinfoflagsh = -1;
static int hf_bit13retinfoflagsh = -1;
static int hf_bit14retinfoflagsh = -1;
static int hf_bit15retinfoflagsh = -1;
static int hf_bit16retinfoflagsh = -1;
static int hf_bit1lflags = -1;
static int hf_bit2lflags = -1;
static int hf_bit3lflags = -1;
static int hf_bit4lflags = -1;
static int hf_bit5lflags = -1;
static int hf_bit6lflags = -1;
static int hf_bit7lflags = -1;
static int hf_bit8lflags = -1;
static int hf_bit9lflags = -1;
static int hf_bit10lflags = -1;
static int hf_bit11lflags = -1;
static int hf_bit12lflags = -1;
static int hf_bit13lflags = -1;
static int hf_bit14lflags = -1;
static int hf_bit15lflags = -1;
static int hf_bit16lflags = -1;
static int hf_l1flagsl = -1;
static int hf_l1flagsh = -1;
static int hf_bit1l1flagsl = -1;
static int hf_bit2l1flagsl = -1;
static int hf_bit3l1flagsl = -1;
static int hf_bit4l1flagsl = -1;
static int hf_bit5l1flagsl = -1;
static int hf_bit6l1flagsl = -1;
static int hf_bit7l1flagsl = -1;
static int hf_bit8l1flagsl = -1;
static int hf_bit9l1flagsl = -1;
static int hf_bit10l1flagsl = -1;
static int hf_bit11l1flagsl = -1;
static int hf_bit12l1flagsl = -1;
static int hf_bit13l1flagsl = -1;
static int hf_bit14l1flagsl = -1;
static int hf_bit15l1flagsl = -1;
static int hf_bit16l1flagsl = -1;
static int hf_bit1l1flagsh = -1;
static int hf_bit2l1flagsh = -1;
static int hf_bit3l1flagsh = -1;
static int hf_bit4l1flagsh = -1;
static int hf_bit5l1flagsh = -1;
static int hf_bit6l1flagsh = -1;
static int hf_bit7l1flagsh = -1;
static int hf_bit8l1flagsh = -1;
static int hf_bit9l1flagsh = -1;
static int hf_bit10l1flagsh = -1;
static int hf_bit11l1flagsh = -1;
static int hf_bit12l1flagsh = -1;
static int hf_bit13l1flagsh = -1;
static int hf_bit14l1flagsh = -1;
static int hf_bit15l1flagsh = -1;
static int hf_bit16l1flagsh = -1;
static int hf_nds_tree_name = -1;
static int hf_nds_reply_error = -1;
static int hf_nds_net = -1;
static int hf_nds_node = -1;
static int hf_nds_socket = -1;
static int hf_add_ref_ip = -1;
static int hf_add_ref_udp = -1;
static int hf_add_ref_tcp = -1;
static int hf_referral_record = -1;
static int hf_referral_addcount = -1;
static int hf_nds_port = -1;
static int hf_mv_string = -1;
static int hf_nds_syntax = -1;
static int hf_value_string = -1;
static int hf_nds_buffer_size = -1;
static int hf_nds_ver = -1;
static int hf_nds_nflags = -1;
static int hf_nds_scope = -1;
static int hf_nds_name = -1;
static int hf_nds_comm_trans = -1;
static int hf_nds_tree_trans = -1;
static int hf_nds_iteration = -1;
static int hf_nds_eid = -1;
static int hf_nds_info_type = -1;
static int hf_nds_all_attr = -1;
static int hf_nds_req_flags = -1;
static int hf_nds_attr = -1;
static int hf_nds_crc = -1;
static int hf_nds_referrals = -1;
static int hf_nds_result_flags = -1;
static int hf_nds_tag_string = -1;
static int hf_value_bytes = -1;
static int hf_replica_type = -1;
static int hf_replica_state = -1;
static int hf_replica_number = -1;
static int hf_min_nds_ver = -1;
static int hf_nds_ver_include = -1;
static int hf_nds_ver_exclude = -1;
/* static int hf_nds_es = -1; */
static int hf_es_type = -1;
/* static int hf_delim_string = -1; */
static int hf_rdn_string = -1;
static int hf_nds_revent = -1;
static int hf_nds_rnum = -1;
static int hf_nds_name_type = -1;
static int hf_nds_rflags = -1;
static int hf_nds_eflags = -1;
static int hf_nds_depth = -1;
static int hf_nds_class_def_type = -1;
static int hf_nds_classes = -1;
static int hf_nds_return_all_classes = -1;
static int hf_nds_stream_flags = -1;
static int hf_nds_stream_name = -1;
static int hf_nds_file_handle = -1;
static int hf_nds_file_size = -1;
static int hf_nds_dn_output_type = -1;
static int hf_nds_nested_output_type = -1;
static int hf_nds_output_delimiter = -1;
static int hf_nds_output_entry_specifier = -1;
static int hf_es_value = -1;
static int hf_es_rdn_count = -1;
static int hf_nds_replica_num = -1;
static int hf_nds_event_num = -1;
static int hf_es_seconds = -1;
static int hf_nds_compare_results = -1;
static int hf_nds_parent = -1;
static int hf_nds_name_filter = -1;
static int hf_nds_class_filter = -1;
static int hf_nds_time_filter = -1;
static int hf_nds_partition_root_id = -1;
static int hf_nds_replicas = -1;
static int hf_nds_purge = -1;
static int hf_nds_local_partition = -1;
static int hf_partition_busy = -1;
static int hf_nds_number_of_changes = -1;
static int hf_sub_count = -1;
static int hf_nds_revision = -1;
static int hf_nds_base_class = -1;
static int hf_nds_relative_dn = -1;
/* static int hf_nds_root_dn = -1; */
/* static int hf_nds_parent_dn = -1; */
static int hf_deref_base = -1;
/* static int hf_nds_entry_info = -1; */
static int hf_nds_base = -1;
static int hf_nds_privileges = -1;
static int hf_nds_vflags = -1;
static int hf_nds_value_len = -1;
static int hf_nds_cflags = -1;
static int hf_nds_acflags = -1;
static int hf_nds_asn1 = -1;
static int hf_nds_upper = -1;
static int hf_nds_lower = -1;
static int hf_nds_trustee_dn = -1;
static int hf_nds_attribute_dn = -1;
static int hf_nds_acl_add = -1;
static int hf_nds_acl_del = -1;
static int hf_nds_att_add = -1;
static int hf_nds_att_del = -1;
static int hf_nds_keep = -1;
static int hf_nds_new_rdn = -1;
static int hf_nds_time_delay = -1;
static int hf_nds_root_name = -1;
static int hf_nds_new_part_id = -1;
static int hf_nds_child_part_id = -1;
static int hf_nds_master_part_id = -1;
static int hf_nds_target_name = -1;
static int hf_nds_super = -1;
static int hf_pingflags2 = -1;
static int hf_bit1pingflags2 = -1;
static int hf_bit2pingflags2 = -1;
static int hf_bit3pingflags2 = -1;
static int hf_bit4pingflags2 = -1;
static int hf_bit5pingflags2 = -1;
static int hf_bit6pingflags2 = -1;
static int hf_bit7pingflags2 = -1;
static int hf_bit8pingflags2 = -1;
static int hf_bit9pingflags2 = -1;
static int hf_bit10pingflags2 = -1;
static int hf_bit11pingflags2 = -1;
static int hf_bit12pingflags2 = -1;
static int hf_bit13pingflags2 = -1;
static int hf_bit14pingflags2 = -1;
static int hf_bit15pingflags2 = -1;
static int hf_bit16pingflags2 = -1;
static int hf_pingflags1 = -1;
static int hf_bit1pingflags1 = -1;
static int hf_bit2pingflags1 = -1;
static int hf_bit3pingflags1 = -1;
static int hf_bit4pingflags1 = -1;
static int hf_bit5pingflags1 = -1;
static int hf_bit6pingflags1 = -1;
static int hf_bit7pingflags1 = -1;
static int hf_bit8pingflags1 = -1;
static int hf_bit9pingflags1 = -1;
static int hf_bit10pingflags1 = -1;
static int hf_bit11pingflags1 = -1;
static int hf_bit12pingflags1 = -1;
static int hf_bit13pingflags1 = -1;
static int hf_bit14pingflags1 = -1;
static int hf_bit15pingflags1 = -1;
static int hf_bit16pingflags1 = -1;
static int hf_pingpflags1 = -1;
static int hf_bit1pingpflags1 = -1;
static int hf_bit2pingpflags1 = -1;
static int hf_bit3pingpflags1 = -1;
static int hf_bit4pingpflags1 = -1;
static int hf_bit5pingpflags1 = -1;
static int hf_bit6pingpflags1 = -1;
static int hf_bit7pingpflags1 = -1;
static int hf_bit8pingpflags1 = -1;
static int hf_bit9pingpflags1 = -1;
static int hf_bit10pingpflags1 = -1;
static int hf_bit11pingpflags1 = -1;
static int hf_bit12pingpflags1 = -1;
static int hf_bit13pingpflags1 = -1;
static int hf_bit14pingpflags1 = -1;
static int hf_bit15pingpflags1 = -1;
static int hf_bit16pingpflags1 = -1;
static int hf_pingvflags1 = -1;
static int hf_bit1pingvflags1 = -1;
static int hf_bit2pingvflags1 = -1;
static int hf_bit3pingvflags1 = -1;
static int hf_bit4pingvflags1 = -1;
static int hf_bit5pingvflags1 = -1;
static int hf_bit6pingvflags1 = -1;
static int hf_bit7pingvflags1 = -1;
static int hf_bit8pingvflags1 = -1;
static int hf_bit9pingvflags1 = -1;
static int hf_bit10pingvflags1 = -1;
static int hf_bit11pingvflags1 = -1;
static int hf_bit12pingvflags1 = -1;
static int hf_bit13pingvflags1 = -1;
static int hf_bit14pingvflags1 = -1;
static int hf_bit15pingvflags1 = -1;
static int hf_bit16pingvflags1 = -1;
static int hf_nds_letter_ver = -1;
static int hf_nds_os_majver = -1;
static int hf_nds_os_minver = -1;
static int hf_nds_lic_flags = -1;
static int hf_nds_ds_time = -1;
static int hf_nds_ping_version = -1;
static int hf_nds_search_scope = -1;
static int hf_nds_num_objects = -1;
static int hf_siflags = -1;
static int hf_bit1siflags = -1;
static int hf_bit2siflags = -1;
static int hf_bit3siflags = -1;
static int hf_bit4siflags = -1;
static int hf_bit5siflags = -1;
static int hf_bit6siflags = -1;
static int hf_bit7siflags = -1;
static int hf_bit8siflags = -1;
static int hf_bit9siflags = -1;
static int hf_bit10siflags = -1;
static int hf_bit11siflags = -1;
static int hf_bit12siflags = -1;
static int hf_bit13siflags = -1;
static int hf_bit14siflags = -1;
static int hf_bit15siflags = -1;
static int hf_bit16siflags = -1;
static int hf_nds_segments = -1;
static int hf_nds_segment = -1;
static int hf_nds_segment_overlap = -1;
static int hf_nds_segment_overlap_conflict = -1;
static int hf_nds_segment_multiple_tails = -1;
static int hf_nds_segment_too_long_segment = -1;
static int hf_nds_segment_error = -1;
static int hf_nds_segment_count = -1;
static int hf_nds_reassembled_length = -1;
static int hf_nds_verb2b_req_flags = -1;
static int hf_ncp_ip_address = -1;
static int hf_ncp_copyright = -1;
static int hf_ndsprot1flag = -1;
static int hf_ndsprot2flag = -1;
static int hf_ndsprot3flag = -1;
static int hf_ndsprot4flag = -1;
static int hf_ndsprot5flag = -1;
static int hf_ndsprot6flag = -1;
static int hf_ndsprot7flag = -1;
static int hf_ndsprot8flag = -1;
static int hf_ndsprot9flag = -1;
static int hf_ndsprot10flag = -1;
static int hf_ndsprot11flag = -1;
static int hf_ndsprot12flag = -1;
static int hf_ndsprot13flag = -1;
static int hf_ndsprot14flag = -1;
static int hf_ndsprot15flag = -1;
static int hf_ndsprot16flag = -1;
static int hf_nds_svr_dst_name = -1;
static int hf_nds_tune_mark = -1;
/* static int hf_nds_create_time = -1; */
static int hf_srvr_param_number = -1;
static int hf_srvr_param_boolean = -1;
static int hf_srvr_param_string = -1;
static int hf_nds_svr_time = -1;
static int hf_nds_crt_time = -1;
static int hf_nds_number_of_items = -1;
static int hf_nds_compare_attributes = -1;
static int hf_nds_read_attribute = -1;
static int hf_nds_write_add_delete_attribute = -1;
static int hf_nds_add_delete_self = -1;
static int hf_nds_privilege_not_defined = -1;
static int hf_nds_supervisor = -1;
static int hf_nds_inheritance_control = -1;
static int hf_nds_browse_entry = -1;
static int hf_nds_add_entry = -1;
static int hf_nds_delete_entry = -1;
static int hf_nds_rename_entry = -1;
static int hf_nds_supervisor_entry = -1;
static int hf_nds_entry_privilege_not_defined = -1;
static int hf_nds_iterator = -1;
static int hf_ncp_nds_iterverb = -1;
static int hf_iter_completion_code = -1;
/* static int hf_nds_iterobj = -1; */
static int hf_iter_verb_completion_code = -1;
static int hf_iter_ans = -1;
static int hf_positionable = -1;
static int hf_num_skipped = -1;
static int hf_num_to_skip = -1;
static int hf_timelimit = -1;
static int hf_iter_index = -1;
static int hf_num_to_get = -1;
/* static int hf_ret_info_type = -1; */
static int hf_data_size = -1;
static int hf_this_count = -1;
static int hf_max_entries = -1;
static int hf_move_position = -1;
static int hf_iter_copy = -1;
static int hf_iter_position = -1;
static int hf_iter_search = -1;
static int hf_iter_other = -1;
static int hf_nds_oid = -1;
static int hf_ncp_bytes_actually_trans_64 = -1;
static int hf_sap_name = -1;
static int hf_os_name = -1;
static int hf_vendor_name = -1;
static int hf_hardware_name = -1;
static int hf_no_request_record_found = -1;
static int hf_search_modifier = -1;
static int hf_search_pattern = -1;
static int hf_nds_acl_protected_attribute = -1;
static int hf_nds_acl_subject = -1;
static int hf_nds_acl_privileges = -1;
static expert_field ei_ncp_file_rights_change = EI_INIT;
static expert_field ei_ncp_completion_code = EI_INIT;
static expert_field ei_nds_reply_error = EI_INIT;
static expert_field ei_ncp_destroy_connection = EI_INIT;
static expert_field ei_nds_iteration = EI_INIT;
static expert_field ei_ncp_eid = EI_INIT;
static expert_field ei_ncp_file_handle = EI_INIT;
static expert_field ei_ncp_connection_destroyed = EI_INIT;
static expert_field ei_ncp_no_request_record_found = EI_INIT;
static expert_field ei_ncp_file_rights = EI_INIT;
static expert_field ei_iter_verb_completion_code = EI_INIT;
static expert_field ei_ncp_connection_request = EI_INIT;
static expert_field ei_ncp_connection_status = EI_INIT;
static expert_field ei_ncp_op_lock_handle = EI_INIT;
static expert_field ei_ncp_effective_rights = EI_INIT;
static expert_field ei_ncp_server = EI_INIT;
static expert_field ei_ncp_invalid_offset = EI_INIT;
static expert_field ei_ncp_address_type = EI_INIT;
static expert_field ei_ncp_value_too_large = EI_INIT;
""")
# Look at all packet types in the packets collection, and cull information
# from them.
errors_used_list = []
errors_used_hash = {}
groups_used_list = []
groups_used_hash = {}
variables_used_hash = {}
structs_used_hash = {}
for pkt in packets:
# Determine which error codes are used.
codes = pkt.CompletionCodes()
for code in codes.Records():
if code not in errors_used_hash:
errors_used_hash[code] = len(errors_used_list)
errors_used_list.append(code)
# Determine which groups are used.
group = pkt.Group()
if group not in groups_used_hash:
groups_used_hash[group] = len(groups_used_list)
groups_used_list.append(group)
# Determine which variables are used.
vars = pkt.Variables()
ExamineVars(vars, structs_used_hash, variables_used_hash)
# Print the hf variable declarations
sorted_vars = list(variables_used_hash.values())
sorted_vars.sort()
for var in sorted_vars:
print("static int " + var.HFName() + " = -1;")
# Print the value_string's
for var in sorted_vars:
if isinstance(var, val_string):
print("")
print(var.Code())
# Determine which error codes are not used
errors_not_used = {}
# Copy the keys from the error list...
for code in list(errors.keys()):
errors_not_used[code] = 1
# ... and remove the ones that *were* used.
for code in errors_used_list:
del errors_not_used[code]
# Print a remark showing errors not used
list_errors_not_used = list(errors_not_used.keys())
list_errors_not_used.sort()
for code in list_errors_not_used:
print("/* Error 0x%04x not used: %s */" % (code, errors[code]))
print("\n")
# Print the errors table
print("/* Error strings. */")
print("static const char *ncp_errors[] = {")
for code in errors_used_list:
print(' /* %02d (0x%04x) */ "%s",' % (errors_used_hash[code], code, errors[code]))
print("};\n")
# Determine which groups are not used
groups_not_used = {}
# Copy the keys from the group list...
for group in list(groups.keys()):
groups_not_used[group] = 1
# ... and remove the ones that *were* used.
for group in groups_used_list:
del groups_not_used[group]
# Print a remark showing groups not used
list_groups_not_used = list(groups_not_used.keys())
list_groups_not_used.sort()
for group in list_groups_not_used:
print("/* Group not used: %s = %s */" % (group, groups[group]))
print("\n")
# Print the groups table
print("/* Group strings. */")
print("static const char *ncp_groups[] = {")
for group in groups_used_list:
print(' /* %02d (%s) */ "%s",' % (groups_used_hash[group], group, groups[group]))
print("};\n")
# Print the group macros
for group in groups_used_list:
name = str.upper(group)
print("#define NCP_GROUP_%s %d" % (name, groups_used_hash[group]))
print("\n")
# Print the conditional_records for all Request Conditions.
num = 0
print("/* Request-Condition dfilter records. The NULL pointer")
print(" is replaced by a pointer to the created dfilter_t. */")
if len(global_req_cond) == 0:
print("static conditional_record req_conds = NULL;")
else:
print("static conditional_record req_conds[] = {")
req_cond_l = list(global_req_cond.keys())
req_cond_l.sort()
for req_cond in req_cond_l:
print(" { \"%s\", NULL }," % (req_cond,))
global_req_cond[req_cond] = num
num = num + 1
print("};")
print("#define NUM_REQ_CONDS %d" % (num,))
print("#define NO_REQ_COND NUM_REQ_CONDS\n\n")
# Print PTVC's for bitfields
ett_list = []
print("/* PTVC records for bit-fields. */")
for var in sorted_vars:
if isinstance(var, bitfield):
sub_vars_ptvc = var.SubVariablesPTVC()
print("/* %s */" % (sub_vars_ptvc.Name()))
print(sub_vars_ptvc.Code())
ett_list.append(sub_vars_ptvc.ETTName())
# Print the PTVC's for structures
print("/* PTVC records for structs. */")
# Sort them
svhash = {}
for svar in list(structs_used_hash.values()):
svhash[svar.HFName()] = svar
if svar.descr:
ett_list.append(svar.ETTName())
struct_vars = list(svhash.keys())
struct_vars.sort()
for varname in struct_vars:
var = svhash[varname]
print(var.Code())
ett_list.sort()
# Print info string structures
print("/* Info Strings */")
for pkt in packets:
if pkt.req_info_str:
name = pkt.InfoStrName() + "_req"
var = pkt.req_info_str[0]
print("static const info_string_t %s = {" % (name,))
print(" &%s," % (var.HFName(),))
print(' "%s",' % (pkt.req_info_str[1],))
print(' "%s"' % (pkt.req_info_str[2],))
print("};\n")
# Print regular PTVC's
print("/* PTVC records. These are re-used to save space. */")
for ptvc in ptvc_lists.Members():
if not ptvc.Null() and not ptvc.Empty():
print(ptvc.Code())
# Print error_equivalency tables
print("/* Error-Equivalency Tables. These are re-used to save space. */")
for compcodes in compcode_lists.Members():
errors = compcodes.Records()
# Make sure the record for error = 0x00 comes last.
print("static const error_equivalency %s[] = {" % (compcodes.Name()))
for error in errors:
error_in_packet = error >> 8;
ncp_error_index = errors_used_hash[error]
print(" { 0x%02x, %d }, /* 0x%04x */" % (error_in_packet,
ncp_error_index, error))
print(" { 0x00, -1 }\n};\n")
# Print integer arrays for all ncp_records that need
# a list of req_cond_indexes. Do it "uniquely" to save space;
# if multiple packets share the same set of req_cond's,
# then they'll share the same integer array
print("/* Request Condition Indexes */")
# First, make them unique
req_cond_collection = UniqueCollection("req_cond_collection")
for pkt in packets:
req_conds = pkt.CalculateReqConds()
if req_conds:
unique_list = req_cond_collection.Add(req_conds)
pkt.SetReqConds(unique_list)
else:
pkt.SetReqConds(None)
# Print them
for req_cond in req_cond_collection.Members():
sys.stdout.write("static const int %s[] = {" % (req_cond.Name()))
sys.stdout.write(" ")
vals = []
for text in req_cond.Records():
vals.append(global_req_cond[text])
vals.sort()
for val in vals:
sys.stdout.write("%s, " % (val,))
print("-1 };")
print("")
# Functions without length parameter
funcs_without_length = {}
print("/* Forward declaration of expert info functions defined in ncp2222.inc */")
for expert in expert_hash:
print("static void %s_expert_func(ptvcursor_t *ptvc, packet_info *pinfo, const ncp_record *ncp_rec, bool request);" % expert)
# Print ncp_record packet records
print("#define SUBFUNC_WITH_LENGTH 0x02")
print("#define SUBFUNC_NO_LENGTH 0x01")
print("#define NO_SUBFUNC 0x00")
print("/* ncp_record structs for packets */")
print("static const ncp_record ncp_packets[] = {")
for pkt in packets:
if pkt.HasSubFunction():
func = pkt.FunctionCode('high')
if pkt.HasLength():
subfunc_string = "SUBFUNC_WITH_LENGTH"
# Ensure that the function either has a length param or not
if func in funcs_without_length:
sys.exit("Function 0x%04x sometimes has length param, sometimes not." \
% (pkt.FunctionCode(),))
else:
subfunc_string = "SUBFUNC_NO_LENGTH"
funcs_without_length[func] = 1
else:
subfunc_string = "NO_SUBFUNC"
sys.stdout.write(' { 0x%02x, 0x%02x, %s, "%s",' % (pkt.FunctionCode('high'),
pkt.FunctionCode('low'), subfunc_string, pkt.Description()))
print(' %d /* %s */,' % (groups_used_hash[pkt.Group()], pkt.Group()))
ptvc = pkt.PTVCRequest()
if not ptvc.Null() and not ptvc.Empty():
ptvc_request = ptvc.Name()
else:
ptvc_request = 'NULL'
ptvc = pkt.PTVCReply()
if not ptvc.Null() and not ptvc.Empty():
ptvc_reply = ptvc.Name()
else:
ptvc_reply = 'NULL'
errors = pkt.CompletionCodes()
req_conds_obj = pkt.GetReqConds()
if req_conds_obj:
req_conds = req_conds_obj.Name()
else:
req_conds = "NULL"
if not req_conds_obj:
req_cond_size = "NO_REQ_COND_SIZE"
else:
req_cond_size = pkt.ReqCondSize()
if req_cond_size is None:
msg.write("NCP packet %s needs a ReqCondSize*() call\n" \
% (pkt.CName(),))
sys.exit(1)
if pkt.expert_func:
expert_func = "&" + pkt.expert_func + "_expert_func"
else:
expert_func = "NULL"
print(' %s, %s, %s, %s, %s, %s },\n' % \
(ptvc_request, ptvc_reply, errors.Name(), req_conds,
req_cond_size, expert_func))
print(' { 0, 0, 0, NULL, 0, NULL, NULL, NULL, NULL, NO_REQ_COND_SIZE, NULL }')
print("};\n")
print("/* ncp funcs that require a subfunc */")
print("static const uint8_t ncp_func_requires_subfunc[] = {")
hi_seen = {}
for pkt in packets:
if pkt.HasSubFunction():
hi_func = pkt.FunctionCode('high')
if hi_func not in hi_seen:
print(" 0x%02x," % (hi_func))
hi_seen[hi_func] = 1
print(" 0")
print("};\n")
print("/* ncp funcs that have no length parameter */")
print("static const uint8_t ncp_func_has_no_length_parameter[] = {")
funcs = list(funcs_without_length.keys())
funcs.sort()
for func in funcs:
print(" 0x%02x," % (func,))
print(" 0")
print("};\n")
print("")
# proto_register_ncp2222()
print("""
static const value_string connection_status_vals[] = {
{ 0x00, "Ok" },
{ 0x01, "Bad Service Connection" },
{ 0x10, "File Server is Down" },
{ 0x40, "Broadcast Message Pending" },
{ 0, NULL }
};
#include "packet-ncp2222.inc"
void
proto_register_ncp2222(void)
{
static hf_register_info hf[] = {
{ &hf_ncp_number_of_data_streams_long,
{ "Number of Data Streams", "ncp.number_of_data_streams_long", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_func,
{ "Function", "ncp.func", FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_length,
{ "Packet Length", "ncp.length", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_subfunc,
{ "SubFunction", "ncp.subfunc", FT_UINT8, BASE_DEC_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_completion_code,
{ "Completion Code", "ncp.completion_code", FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_group,
{ "NCP Group Type", "ncp.group", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_fragment_handle,
{ "NDS Fragment Handle", "ncp.ndsfrag", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_fragment_size,
{ "NDS Fragment Size", "ncp.ndsfragsize", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_message_size,
{ "Message Size", "ncp.ndsmessagesize", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_nds_flag,
{ "NDS Protocol Flags", "ncp.ndsflag", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_nds_verb,
{ "NDS Verb", "ncp.ndsverb", FT_UINT8, BASE_HEX, VALS(ncp_nds_verb_vals), 0x0, NULL, HFILL }},
{ &hf_ping_version,
{ "NDS Version", "ncp.ping_version", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
#if 0 /* Unused ? */
{ &hf_nds_version,
{ "NDS Version", "ncp.nds_version", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
#endif
{ &hf_nds_tree_name,
{ "NDS Tree Name", "ncp.nds_tree_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
/*
* XXX - the page at
*
* https://web.archive.org/web/20030629082113/http://www.odyssea.com/whats_new/tcpipnet/tcpipnet.html
*
* says of the connection status "The Connection Code field may
* contain values that indicate the status of the client host to
* server connection. A value of 1 in the fourth bit of this data
* byte indicates that the server is unavailable (server was
* downed).
*
* The page at
*
* https://web.archive.org/web/20090809191415/http://www.unm.edu/~network/presentations/course/appendix/appendix_f/tsld088.htm
*
* says that bit 0 is "bad service", bit 2 is "no connection
* available", bit 4 is "service down", and bit 6 is "server
* has a broadcast message waiting for the client".
*
* Should it be displayed in hex, and should those bits (and any
* other bits with significance) be displayed as bitfields
* underneath it?
*/
{ &hf_ncp_connection_status,
{ "Connection Status", "ncp.connection_status", FT_UINT8, BASE_DEC, VALS(connection_status_vals), 0x0, NULL, HFILL }},
{ &hf_ncp_req_frame_num,
{ "Response to Request in Frame Number", "ncp.req_frame_num", FT_FRAMENUM, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_req_frame_time,
{ "Time from Request", "ncp.time", FT_RELATIVE_TIME, BASE_NONE, NULL, 0x0, "Time between request and response in seconds", HFILL }},
#if 0 /* Unused ? */
{ &hf_nds_flags,
{ "NDS Return Flags", "ncp.nds_flags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
#endif
{ &hf_nds_reply_depth,
{ "Distance from Root", "ncp.ndsdepth", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_reply_rev,
{ "NDS Revision", "ncp.ndsrev", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_reply_flags,
{ "Flags", "ncp.ndsflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_p1type,
{ "NDS Parameter Type", "ncp.p1type", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_uint32value,
{ "NDS Value", "ncp.uint32value", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_bit1,
{ "Typeless", "ncp.nds_bit1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_nds_bit2,
{ "All Containers", "ncp.nds_bit2", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_nds_bit3,
{ "Slashed", "ncp.nds_bit3", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_nds_bit4,
{ "Dotted", "ncp.nds_bit4", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_nds_bit5,
{ "Tuned", "ncp.nds_bit5", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_nds_bit6,
{ "Not Defined", "ncp.nds_bit6", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_nds_bit7,
{ "Not Defined", "ncp.nds_bit7", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_nds_bit8,
{ "Not Defined", "ncp.nds_bit8", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_nds_bit9,
{ "Not Defined", "ncp.nds_bit9", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_nds_bit10,
{ "Not Defined", "ncp.nds_bit10", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_nds_bit11,
{ "Not Defined", "ncp.nds_bit11", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_nds_bit12,
{ "Not Defined", "ncp.nds_bit12", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_nds_bit13,
{ "Not Defined", "ncp.nds_bit13", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_nds_bit14,
{ "Not Defined", "ncp.nds_bit14", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_nds_bit15,
{ "Not Defined", "ncp.nds_bit15", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_nds_bit16,
{ "Not Defined", "ncp.nds_bit16", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_outflags,
{ "Output Flags", "ncp.outflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1outflags,
{ "Output Flags", "ncp.bit1outflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2outflags,
{ "Entry ID", "ncp.bit2outflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3outflags,
{ "Replica State", "ncp.bit3outflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4outflags,
{ "Modification Timestamp", "ncp.bit4outflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5outflags,
{ "Purge Time", "ncp.bit5outflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6outflags,
{ "Local Partition ID", "ncp.bit6outflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7outflags,
{ "Distinguished Name", "ncp.bit7outflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8outflags,
{ "Replica Type", "ncp.bit8outflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9outflags,
{ "Partition Busy", "ncp.bit9outflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10outflags,
{ "Not Defined", "ncp.bit10outflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11outflags,
{ "Not Defined", "ncp.bit11outflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12outflags,
{ "Not Defined", "ncp.bit12outflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13outflags,
{ "Not Defined", "ncp.bit13outflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14outflags,
{ "Not Defined", "ncp.bit14outflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15outflags,
{ "Not Defined", "ncp.bit15outflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16outflags,
{ "Not Defined", "ncp.bit16outflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_bit1nflags,
{ "Entry ID", "ncp.bit1nflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2nflags,
{ "Readable", "ncp.bit2nflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3nflags,
{ "Writeable", "ncp.bit3nflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4nflags,
{ "Master", "ncp.bit4nflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5nflags,
{ "Create ID", "ncp.bit5nflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6nflags,
{ "Walk Tree", "ncp.bit6nflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7nflags,
{ "Dereference Alias", "ncp.bit7nflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8nflags,
{ "Not Defined", "ncp.bit8nflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9nflags,
{ "Not Defined", "ncp.bit9nflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10nflags,
{ "Not Defined", "ncp.bit10nflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11nflags,
{ "Not Defined", "ncp.bit11nflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12nflags,
{ "Not Defined", "ncp.bit12nflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13nflags,
{ "Not Defined", "ncp.bit13nflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14nflags,
{ "Prefer Referrals", "ncp.bit14nflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15nflags,
{ "Prefer Only Referrals", "ncp.bit15nflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16nflags,
{ "Not Defined", "ncp.bit16nflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_bit1rflags,
{ "Typeless", "ncp.bit1rflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2rflags,
{ "Slashed", "ncp.bit2rflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3rflags,
{ "Dotted", "ncp.bit3rflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4rflags,
{ "Tuned", "ncp.bit4rflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5rflags,
{ "Not Defined", "ncp.bit5rflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6rflags,
{ "Not Defined", "ncp.bit6rflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7rflags,
{ "Not Defined", "ncp.bit7rflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8rflags,
{ "Not Defined", "ncp.bit8rflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9rflags,
{ "Not Defined", "ncp.bit9rflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10rflags,
{ "Not Defined", "ncp.bit10rflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11rflags,
{ "Not Defined", "ncp.bit11rflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12rflags,
{ "Not Defined", "ncp.bit12rflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13rflags,
{ "Not Defined", "ncp.bit13rflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14rflags,
{ "Not Defined", "ncp.bit14rflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15rflags,
{ "Not Defined", "ncp.bit15rflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16rflags,
{ "Not Defined", "ncp.bit16rflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_eflags,
{ "Entry Flags", "ncp.eflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1eflags,
{ "Alias Entry", "ncp.bit1eflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2eflags,
{ "Partition Root", "ncp.bit2eflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3eflags,
{ "Container Entry", "ncp.bit3eflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4eflags,
{ "Container Alias", "ncp.bit4eflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5eflags,
{ "Matches List Filter", "ncp.bit5eflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6eflags,
{ "Reference Entry", "ncp.bit6eflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7eflags,
{ "40x Reference Entry", "ncp.bit7eflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8eflags,
{ "Back Linked", "ncp.bit8eflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9eflags,
{ "New Entry", "ncp.bit9eflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10eflags,
{ "Temporary Reference", "ncp.bit10eflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11eflags,
{ "Audited", "ncp.bit11eflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12eflags,
{ "Entry Not Present", "ncp.bit12eflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13eflags,
{ "Entry Verify CTS", "ncp.bit13eflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14eflags,
{ "Entry Damaged", "ncp.bit14eflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15eflags,
{ "Not Defined", "ncp.bit15rflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16eflags,
{ "Not Defined", "ncp.bit16rflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_infoflagsl,
{ "Information Flags (low) Byte", "ncp.infoflagsl", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_retinfoflagsl,
{ "Return Information Flags (low) Byte", "ncp.retinfoflagsl", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1infoflagsl,
{ "Output Flags", "ncp.bit1infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2infoflagsl,
{ "Entry ID", "ncp.bit2infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3infoflagsl,
{ "Entry Flags", "ncp.bit3infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4infoflagsl,
{ "Subordinate Count", "ncp.bit4infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5infoflagsl,
{ "Modification Time", "ncp.bit5infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6infoflagsl,
{ "Modification Timestamp", "ncp.bit6infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7infoflagsl,
{ "Creation Timestamp", "ncp.bit7infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8infoflagsl,
{ "Partition Root ID", "ncp.bit8infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9infoflagsl,
{ "Parent ID", "ncp.bit9infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10infoflagsl,
{ "Revision Count", "ncp.bit10infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11infoflagsl,
{ "Replica Type", "ncp.bit11infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12infoflagsl,
{ "Base Class", "ncp.bit12infoflagsl", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13infoflagsl,
{ "Relative Distinguished Name", "ncp.bit13infoflagsl", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14infoflagsl,
{ "Distinguished Name", "ncp.bit14infoflagsl", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15infoflagsl,
{ "Root Distinguished Name", "ncp.bit15infoflagsl", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16infoflagsl,
{ "Parent Distinguished Name", "ncp.bit16infoflagsl", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_infoflagsh,
{ "Information Flags (high) Byte", "ncp.infoflagsh", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1infoflagsh,
{ "Purge Time", "ncp.bit1infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2infoflagsh,
{ "Dereference Base Class", "ncp.bit2infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3infoflagsh,
{ "Not Defined", "ncp.bit3infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4infoflagsh,
{ "Not Defined", "ncp.bit4infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5infoflagsh,
{ "Not Defined", "ncp.bit5infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6infoflagsh,
{ "Not Defined", "ncp.bit6infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7infoflagsh,
{ "Not Defined", "ncp.bit7infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8infoflagsh,
{ "Not Defined", "ncp.bit8infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9infoflagsh,
{ "Not Defined", "ncp.bit9infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10infoflagsh,
{ "Not Defined", "ncp.bit10infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11infoflagsh,
{ "Not Defined", "ncp.bit11infoflagsh", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12infoflagsh,
{ "Not Defined", "ncp.bit12infoflagshs", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13infoflagsh,
{ "Not Defined", "ncp.bit13infoflagsh", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14infoflagsh,
{ "Not Defined", "ncp.bit14infoflagsh", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15infoflagsh,
{ "Not Defined", "ncp.bit15infoflagsh", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16infoflagsh,
{ "Not Defined", "ncp.bit16infoflagsh", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_retinfoflagsh,
{ "Return Information Flags (high) Byte", "ncp.retinfoflagsh", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1retinfoflagsh,
{ "Purge Time", "ncp.bit1retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2retinfoflagsh,
{ "Dereference Base Class", "ncp.bit2retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3retinfoflagsh,
{ "Replica Number", "ncp.bit3retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4retinfoflagsh,
{ "Replica State", "ncp.bit4retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5retinfoflagsh,
{ "Federation Boundary", "ncp.bit5retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6retinfoflagsh,
{ "Schema Boundary", "ncp.bit6retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7retinfoflagsh,
{ "Federation Boundary ID", "ncp.bit7retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8retinfoflagsh,
{ "Schema Boundary ID", "ncp.bit8retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9retinfoflagsh,
{ "Current Subcount", "ncp.bit9retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10retinfoflagsh,
{ "Local Entry Flags", "ncp.bit10retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11retinfoflagsh,
{ "Not Defined", "ncp.bit11retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12retinfoflagsh,
{ "Not Defined", "ncp.bit12retinfoflagshs", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13retinfoflagsh,
{ "Not Defined", "ncp.bit13retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14retinfoflagsh,
{ "Not Defined", "ncp.bit14retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15retinfoflagsh,
{ "Not Defined", "ncp.bit15retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16retinfoflagsh,
{ "Not Defined", "ncp.bit16retinfoflagsh", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_bit1lflags,
{ "List Typeless", "ncp.bit1lflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2lflags,
{ "List Containers", "ncp.bit2lflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3lflags,
{ "List Slashed", "ncp.bit3lflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4lflags,
{ "List Dotted", "ncp.bit4lflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5lflags,
{ "Dereference Alias", "ncp.bit5lflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6lflags,
{ "List All Containers", "ncp.bit6lflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7lflags,
{ "List Obsolete", "ncp.bit7lflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8lflags,
{ "List Tuned Output", "ncp.bit8lflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9lflags,
{ "List External Reference", "ncp.bit9lflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10lflags,
{ "Not Defined", "ncp.bit10lflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11lflags,
{ "Not Defined", "ncp.bit11lflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12lflags,
{ "Not Defined", "ncp.bit12lflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13lflags,
{ "Not Defined", "ncp.bit13lflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14lflags,
{ "Not Defined", "ncp.bit14lflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15lflags,
{ "Not Defined", "ncp.bit15lflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16lflags,
{ "Not Defined", "ncp.bit16lflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_l1flagsl,
{ "Information Flags (low) Byte", "ncp.l1flagsl", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_l1flagsh,
{ "Information Flags (high) Byte", "ncp.l1flagsh", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1l1flagsl,
{ "Output Flags", "ncp.bit1l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2l1flagsl,
{ "Entry ID", "ncp.bit2l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3l1flagsl,
{ "Replica State", "ncp.bit3l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4l1flagsl,
{ "Modification Timestamp", "ncp.bit4l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5l1flagsl,
{ "Purge Time", "ncp.bit5l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6l1flagsl,
{ "Local Partition ID", "ncp.bit6l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7l1flagsl,
{ "Distinguished Name", "ncp.bit7l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8l1flagsl,
{ "Replica Type", "ncp.bit8l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9l1flagsl,
{ "Partition Busy", "ncp.bit9l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10l1flagsl,
{ "Not Defined", "ncp.bit10l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11l1flagsl,
{ "Not Defined", "ncp.bit11l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12l1flagsl,
{ "Not Defined", "ncp.bit12l1flagsl", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13l1flagsl,
{ "Not Defined", "ncp.bit13l1flagsl", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14l1flagsl,
{ "Not Defined", "ncp.bit14l1flagsl", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15l1flagsl,
{ "Not Defined", "ncp.bit15l1flagsl", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16l1flagsl,
{ "Not Defined", "ncp.bit16l1flagsl", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_bit1l1flagsh,
{ "Not Defined", "ncp.bit1l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2l1flagsh,
{ "Not Defined", "ncp.bit2l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3l1flagsh,
{ "Not Defined", "ncp.bit3l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4l1flagsh,
{ "Not Defined", "ncp.bit4l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5l1flagsh,
{ "Not Defined", "ncp.bit5l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6l1flagsh,
{ "Not Defined", "ncp.bit6l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7l1flagsh,
{ "Not Defined", "ncp.bit7l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8l1flagsh,
{ "Not Defined", "ncp.bit8l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9l1flagsh,
{ "Not Defined", "ncp.bit9l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10l1flagsh,
{ "Not Defined", "ncp.bit10l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11l1flagsh,
{ "Not Defined", "ncp.bit11l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12l1flagsh,
{ "Not Defined", "ncp.bit12l1flagsh", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13l1flagsh,
{ "Not Defined", "ncp.bit13l1flagsh", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14l1flagsh,
{ "Not Defined", "ncp.bit14l1flagsh", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15l1flagsh,
{ "Not Defined", "ncp.bit15l1flagsh", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16l1flagsh,
{ "Not Defined", "ncp.bit16l1flagsh", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_vflags,
{ "Value Flags", "ncp.vflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1vflags,
{ "Naming", "ncp.bit1vflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2vflags,
{ "Base Class", "ncp.bit2vflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3vflags,
{ "Present", "ncp.bit3vflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4vflags,
{ "Value Damaged", "ncp.bit4vflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5vflags,
{ "Not Defined", "ncp.bit5vflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6vflags,
{ "Not Defined", "ncp.bit6vflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7vflags,
{ "Not Defined", "ncp.bit7vflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8vflags,
{ "Not Defined", "ncp.bit8vflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9vflags,
{ "Not Defined", "ncp.bit9vflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10vflags,
{ "Not Defined", "ncp.bit10vflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11vflags,
{ "Not Defined", "ncp.bit11vflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12vflags,
{ "Not Defined", "ncp.bit12vflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13vflags,
{ "Not Defined", "ncp.bit13vflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14vflags,
{ "Not Defined", "ncp.bit14vflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15vflags,
{ "Not Defined", "ncp.bit15vflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16vflags,
{ "Not Defined", "ncp.bit16vflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_cflags,
{ "Class Flags", "ncp.cflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1cflags,
{ "Container", "ncp.bit1cflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2cflags,
{ "Effective", "ncp.bit2cflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3cflags,
{ "Class Definition Cannot be Removed", "ncp.bit3cflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4cflags,
{ "Ambiguous Naming", "ncp.bit4cflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5cflags,
{ "Ambiguous Containment", "ncp.bit5cflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6cflags,
{ "Auxiliary", "ncp.bit6cflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7cflags,
{ "Operational", "ncp.bit7cflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8cflags,
{ "Sparse Required", "ncp.bit8cflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9cflags,
{ "Sparse Operational", "ncp.bit9cflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10cflags,
{ "Not Defined", "ncp.bit10cflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11cflags,
{ "Not Defined", "ncp.bit11cflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12cflags,
{ "Not Defined", "ncp.bit12cflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13cflags,
{ "Not Defined", "ncp.bit13cflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14cflags,
{ "Not Defined", "ncp.bit14cflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15cflags,
{ "Not Defined", "ncp.bit15cflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16cflags,
{ "Not Defined", "ncp.bit16cflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_bit1acflags,
{ "Single Valued", "ncp.bit1acflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2acflags,
{ "Sized", "ncp.bit2acflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3acflags,
{ "Non-Removable", "ncp.bit3acflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4acflags,
{ "Read Only", "ncp.bit4acflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5acflags,
{ "Hidden", "ncp.bit5acflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6acflags,
{ "String", "ncp.bit6acflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7acflags,
{ "Synchronize Immediate", "ncp.bit7acflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8acflags,
{ "Public Read", "ncp.bit8acflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9acflags,
{ "Server Read", "ncp.bit9acflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10acflags,
{ "Write Managed", "ncp.bit10acflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11acflags,
{ "Per Replica", "ncp.bit11acflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12acflags,
{ "Never Schedule Synchronization", "ncp.bit12acflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13acflags,
{ "Operational", "ncp.bit13acflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14acflags,
{ "Not Defined", "ncp.bit14acflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15acflags,
{ "Not Defined", "ncp.bit15acflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16acflags,
{ "Not Defined", "ncp.bit16acflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_nds_reply_error,
{ "NDS Error", "ncp.ndsreplyerror", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_net,
{ "Network","ncp.ndsnet", FT_IPXNET, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_node,
{ "Node", "ncp.ndsnode", FT_ETHER, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_socket,
{ "Socket", "ncp.ndssocket", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_add_ref_ip,
{ "Address Referral", "ncp.ipref", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_add_ref_udp,
{ "Address Referral", "ncp.udpref", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_add_ref_tcp,
{ "Address Referral", "ncp.tcpref", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_referral_record,
{ "Referral Record", "ncp.ref_rec", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_referral_addcount,
{ "Number of Addresses in Referral", "ncp.ref_addcount", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_port,
{ "Port", "ncp.ndsport", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_mv_string,
{ "Attribute Name", "ncp.mv_string", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_syntax,
{ "Attribute Syntax", "ncp.nds_syntax", FT_UINT32, BASE_DEC, VALS(nds_syntax), 0x0, NULL, HFILL }},
{ &hf_value_string,
{ "Value", "ncp.value_string", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_stream_name,
{ "Stream Name", "ncp.nds_stream_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_buffer_size,
{ "NDS Reply Buffer Size", "ncp.nds_reply_buf", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_ver,
{ "NDS Version", "ncp.nds_ver", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_nflags,
{ "Flags", "ncp.nds_nflags", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_rflags,
{ "Request Flags", "ncp.nds_rflags", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_eflags,
{ "Entry Flags", "ncp.nds_eflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_scope,
{ "Scope", "ncp.nds_scope", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_name,
{ "Name", "ncp.nds_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_name_type,
{ "Name Type", "ncp.nds_name_type", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_comm_trans,
{ "Communications Transport", "ncp.nds_comm_trans", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_tree_trans,
{ "Tree Walker Transport", "ncp.nds_tree_trans", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_iteration,
{ "Iteration Handle", "ncp.nds_iteration", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_iterator,
{ "Iterator", "ncp.nds_iterator", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_file_handle,
{ "File Handle", "ncp.nds_file_handle", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_file_size,
{ "File Size", "ncp.nds_file_size", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_eid,
{ "NDS EID", "ncp.nds_eid", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_depth,
{ "Distance object is from Root", "ncp.nds_depth", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_info_type,
{ "Info Type", "ncp.nds_info_type", FT_UINT32, BASE_RANGE_STRING|BASE_DEC, RVALS(nds_info_type), 0x0, NULL, HFILL }},
{ &hf_nds_class_def_type,
{ "Class Definition Type", "ncp.nds_class_def_type", FT_UINT32, BASE_DEC, VALS(class_def_type), 0x0, NULL, HFILL }},
{ &hf_nds_all_attr,
{ "All Attributes", "ncp.nds_all_attr", FT_UINT32, BASE_DEC, NULL, 0x0, "Return all Attributes?", HFILL }},
{ &hf_nds_return_all_classes,
{ "All Classes", "ncp.nds_return_all_classes", FT_UINT32, BASE_DEC, NULL, 0x0, "Return all Classes?", HFILL }},
{ &hf_nds_req_flags,
{ "Request Flags", "ncp.nds_req_flags", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_attr,
{ "Attributes", "ncp.nds_attributes", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_classes,
{ "Classes", "ncp.nds_classes", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_crc,
{ "CRC", "ncp.nds_crc", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_referrals,
{ "Referrals", "ncp.nds_referrals", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_result_flags,
{ "Result Flags", "ncp.nds_result_flags", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_stream_flags,
{ "Streams Flags", "ncp.nds_stream_flags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_tag_string,
{ "Tags", "ncp.nds_tags", FT_UINT32, BASE_DEC, VALS(nds_tags), 0x0, NULL, HFILL }},
{ &hf_value_bytes,
{ "Bytes", "ncp.value_bytes", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_replica_type,
{ "Replica Type", "ncp.rtype", FT_UINT32, BASE_DEC, VALS(nds_replica_type), 0x0, NULL, HFILL }},
{ &hf_replica_state,
{ "Replica State", "ncp.rstate", FT_UINT16, BASE_DEC, VALS(nds_replica_state), 0x0, NULL, HFILL }},
{ &hf_nds_rnum,
{ "Replica Number", "ncp.rnum", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_revent,
{ "Event", "ncp.revent", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_replica_number,
{ "Replica Number", "ncp.rnum", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_min_nds_ver,
{ "Minimum NDS Version", "ncp.min_nds_version", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_ver_include,
{ "Include NDS Version", "ncp.inc_nds_ver", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_ver_exclude,
{ "Exclude NDS Version", "ncp.exc_nds_ver", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
#if 0 /* Unused ? */
{ &hf_nds_es,
{ "Input Entry Specifier", "ncp.nds_es", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
#endif
{ &hf_es_type,
{ "Entry Specifier Type", "ncp.nds_es_type", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_rdn_string,
{ "RDN", "ncp.nds_rdn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
#if 0 /* Unused ? */
{ &hf_delim_string,
{ "Delimiter", "ncp.nds_delim", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
#endif
{ &hf_nds_dn_output_type,
{ "Output Entry Specifier Type", "ncp.nds_out_es_type", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_nested_output_type,
{ "Nested Output Entry Specifier Type", "ncp.nds_nested_out_es", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_output_delimiter,
{ "Output Delimiter", "ncp.nds_out_delimiter", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_output_entry_specifier,
{ "Output Entry Specifier", "ncp.nds_out_es", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_es_value,
{ "Entry Specifier Value", "ncp.nds_es_value", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_es_rdn_count,
{ "RDN Count", "ncp.nds_es_rdn_count", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_replica_num,
{ "Replica Number", "ncp.nds_replica_num", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_es_seconds,
{ "Seconds", "ncp.nds_es_seconds", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_event_num,
{ "Event Number", "ncp.nds_event_num", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_compare_results,
{ "Compare Values Returned", "ncp.nds_compare_results", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_parent,
{ "Parent ID", "ncp.nds_parent", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_name_filter,
{ "Name Filter", "ncp.nds_name_filter", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_class_filter,
{ "Class Filter", "ncp.nds_class_filter", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_time_filter,
{ "Time Filter", "ncp.nds_time_filter", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_partition_root_id,
{ "Partition Root ID", "ncp.nds_partition_root_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_replicas,
{ "Replicas", "ncp.nds_replicas", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_purge,
{ "Purge Time", "ncp.nds_purge", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_local_partition,
{ "Local Partition ID", "ncp.nds_local_partition", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_partition_busy,
{ "Partition Busy", "ncp.nds_partition_busy", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_number_of_changes,
{ "Number of Attribute Changes", "ncp.nds_number_of_changes", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_sub_count,
{ "Subordinate Count", "ncp.sub_count", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_revision,
{ "Revision Count", "ncp.nds_rev_count", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_base_class,
{ "Base Class", "ncp.nds_base_class", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_relative_dn,
{ "Relative Distinguished Name", "ncp.nds_relative_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
#if 0 /* Unused ? */
{ &hf_nds_root_dn,
{ "Root Distinguished Name", "ncp.nds_root_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
#endif
#if 0 /* Unused ? */
{ &hf_nds_parent_dn,
{ "Parent Distinguished Name", "ncp.nds_parent_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
#endif
{ &hf_deref_base,
{ "Dereference Base Class", "ncp.nds_deref_base", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_base,
{ "Base Class", "ncp.nds_base", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_super,
{ "Super Class", "ncp.nds_super", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
#if 0 /* Unused ? */
{ &hf_nds_entry_info,
{ "Entry Information", "ncp.nds_entry_info", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
#endif
{ &hf_nds_privileges,
{ "Privileges", "ncp.nds_privileges", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_compare_attributes,
{ "Compare Attributes?", "ncp.nds_compare_attributes", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_nds_read_attribute,
{ "Read Attribute?", "ncp.nds_read_attribute", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_nds_write_add_delete_attribute,
{ "Write, Add, Delete Attribute?", "ncp.nds_write_add_delete_attribute", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_nds_add_delete_self,
{ "Add/Delete Self?", "ncp.nds_add_delete_self", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_nds_privilege_not_defined,
{ "Privilege Not defined", "ncp.nds_privilege_not_defined", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_nds_supervisor,
{ "Supervisor?", "ncp.nds_supervisor", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_nds_inheritance_control,
{ "Inheritance?", "ncp.nds_inheritance_control", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_nds_browse_entry,
{ "Browse Entry?", "ncp.nds_browse_entry", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_nds_add_entry,
{ "Add Entry?", "ncp.nds_add_entry", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_nds_delete_entry,
{ "Delete Entry?", "ncp.nds_delete_entry", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_nds_rename_entry,
{ "Rename Entry?", "ncp.nds_rename_entry", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_nds_supervisor_entry,
{ "Supervisor?", "ncp.nds_supervisor_entry", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_nds_entry_privilege_not_defined,
{ "Privilege Not Defined", "ncp.nds_entry_privilege_not_defined", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_nds_vflags,
{ "Value Flags", "ncp.nds_vflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_value_len,
{ "Value Length", "ncp.nds_vlength", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_cflags,
{ "Class Flags", "ncp.nds_cflags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_asn1,
{ "ASN.1 ID", "ncp.nds_asn1", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_acflags,
{ "Attribute Constraint Flags", "ncp.nds_acflags", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_upper,
{ "Upper Limit Value", "ncp.nds_upper", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_lower,
{ "Lower Limit Value", "ncp.nds_lower", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_trustee_dn,
{ "Trustee Distinguished Name", "ncp.nds_trustee_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_attribute_dn,
{ "Attribute Name", "ncp.nds_attribute_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_acl_add,
{ "ACL Templates to Add", "ncp.nds_acl_add", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_acl_del,
{ "Access Control Lists to Delete", "ncp.nds_acl_del", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_att_add,
{ "Attribute to Add", "ncp.nds_att_add", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_att_del,
{ "Attribute Names to Delete", "ncp.nds_att_del", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_keep,
{ "Delete Original RDN", "ncp.nds_keep", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_new_rdn,
{ "New Relative Distinguished Name", "ncp.nds_new_rdn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_time_delay,
{ "Time Delay", "ncp.nds_time_delay", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_root_name,
{ "Root Most Object Name", "ncp.nds_root_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_new_part_id,
{ "New Partition Root ID", "ncp.nds_new_part_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_child_part_id,
{ "Child Partition Root ID", "ncp.nds_child_part_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_master_part_id,
{ "Master Partition Root ID", "ncp.nds_master_part_id", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_target_name,
{ "Target Server Name", "ncp.nds_target_dn", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_pingflags1,
{ "Ping (low) Request Flags", "ncp.pingflags1", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1pingflags1,
{ "Supported Fields", "ncp.bit1pingflags1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2pingflags1,
{ "Depth", "ncp.bit2pingflags1", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3pingflags1,
{ "Build Number", "ncp.bit3pingflags1", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4pingflags1,
{ "Flags", "ncp.bit4pingflags1", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5pingflags1,
{ "Verification Flags", "ncp.bit5pingflags1", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6pingflags1,
{ "Letter Version", "ncp.bit6pingflags1", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7pingflags1,
{ "OS Version", "ncp.bit7pingflags1", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8pingflags1,
{ "Not Defined", "ncp.bit8pingflags1", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9pingflags1,
{ "License Flags", "ncp.bit9pingflags1", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10pingflags1,
{ "DS Time", "ncp.bit10pingflags1", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11pingflags1,
{ "Server Time", "ncp.bit11pingflags1", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12pingflags1,
{ "Create Time", "ncp.bit12pingflags1", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13pingflags1,
{ "Not Defined", "ncp.bit13pingflags1", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14pingflags1,
{ "Not Defined", "ncp.bit14pingflags1", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15pingflags1,
{ "Not Defined", "ncp.bit15pingflags1", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16pingflags1,
{ "Not Defined", "ncp.bit16pingflags1", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_pingflags2,
{ "Ping (high) Request Flags", "ncp.pingflags2", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1pingflags2,
{ "Sap Name", "ncp.bit1pingflags2", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2pingflags2,
{ "Tree Name", "ncp.bit2pingflags2", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3pingflags2,
{ "OS Name", "ncp.bit3pingflags2", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4pingflags2,
{ "Hardware Name", "ncp.bit4pingflags2", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5pingflags2,
{ "Vendor Name", "ncp.bit5pingflags2", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6pingflags2,
{ "Not Defined", "ncp.bit6pingflags2", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7pingflags2,
{ "Not Defined", "ncp.bit7pingflags2", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8pingflags2,
{ "Not Defined", "ncp.bit8pingflags2", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9pingflags2,
{ "Not Defined", "ncp.bit9pingflags2", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10pingflags2,
{ "Not Defined", "ncp.bit10pingflags2", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11pingflags2,
{ "Not Defined", "ncp.bit11pingflags2", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12pingflags2,
{ "Not Defined", "ncp.bit12pingflags2", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13pingflags2,
{ "Not Defined", "ncp.bit13pingflags2", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14pingflags2,
{ "Not Defined", "ncp.bit14pingflags2", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15pingflags2,
{ "Not Defined", "ncp.bit15pingflags2", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16pingflags2,
{ "Not Defined", "ncp.bit16pingflags2", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_pingpflags1,
{ "Ping Data Flags", "ncp.pingpflags1", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1pingpflags1,
{ "Root Most Master Replica", "ncp.bit1pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2pingpflags1,
{ "Is Time Synchronized?", "ncp.bit2pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3pingpflags1,
{ "Is Time Valid?", "ncp.bit3pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4pingpflags1,
{ "Is DS Time Synchronized?", "ncp.bit4pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5pingpflags1,
{ "Does Agent Have All Replicas?", "ncp.bit5pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6pingpflags1,
{ "Not Defined", "ncp.bit6pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7pingpflags1,
{ "Not Defined", "ncp.bit7pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8pingpflags1,
{ "Not Defined", "ncp.bit8pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9pingpflags1,
{ "Not Defined", "ncp.bit9pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10pingpflags1,
{ "Not Defined", "ncp.bit10pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11pingpflags1,
{ "Not Defined", "ncp.bit11pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12pingpflags1,
{ "Not Defined", "ncp.bit12pingpflags1", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13pingpflags1,
{ "Not Defined", "ncp.bit13pingpflags1", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14pingpflags1,
{ "Not Defined", "ncp.bit14pingpflags1", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15pingpflags1,
{ "Not Defined", "ncp.bit15pingpflags1", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16pingpflags1,
{ "Not Defined", "ncp.bit16pingpflags1", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_pingvflags1,
{ "Verification Flags", "ncp.pingvflags1", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1pingvflags1,
{ "Checksum", "ncp.bit1pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2pingvflags1,
{ "CRC32", "ncp.bit2pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3pingvflags1,
{ "Not Defined", "ncp.bit3pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4pingvflags1,
{ "Not Defined", "ncp.bit4pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5pingvflags1,
{ "Not Defined", "ncp.bit5pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6pingvflags1,
{ "Not Defined", "ncp.bit6pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7pingvflags1,
{ "Not Defined", "ncp.bit7pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8pingvflags1,
{ "Not Defined", "ncp.bit8pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9pingvflags1,
{ "Not Defined", "ncp.bit9pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10pingvflags1,
{ "Not Defined", "ncp.bit10pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11pingvflags1,
{ "Not Defined", "ncp.bit11pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12pingvflags1,
{ "Not Defined", "ncp.bit12pingvflags1", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13pingvflags1,
{ "Not Defined", "ncp.bit13pingvflags1", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14pingvflags1,
{ "Not Defined", "ncp.bit14pingvflags1", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15pingvflags1,
{ "Not Defined", "ncp.bit15pingvflags1", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16pingvflags1,
{ "Not Defined", "ncp.bit16pingvflags1", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_nds_letter_ver,
{ "Letter Version", "ncp.nds_letter_ver", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_os_majver,
{ "OS Major Version", "ncp.nds_os_majver", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_os_minver,
{ "OS Minor Version", "ncp.nds_os_minver", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_lic_flags,
{ "License Flags", "ncp.nds_lic_flags", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_ds_time,
{ "DS Time", "ncp.nds_ds_time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_svr_time,
{ "Server Time", "ncp.nds_svr_time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_crt_time,
{ "Agent Create Time", "ncp.nds_crt_time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_ping_version,
{ "Ping Version", "ncp.nds_ping_version", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_search_scope,
{ "Search Scope", "ncp.nds_search_scope", FT_UINT32, BASE_DEC|BASE_RANGE_STRING, RVALS(nds_search_scope), 0x0, NULL, HFILL }},
{ &hf_nds_num_objects,
{ "Number of Objects to Search", "ncp.nds_num_objects", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_siflags,
{ "Information Types", "ncp.siflags", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_bit1siflags,
{ "Names", "ncp.bit1siflags", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_bit2siflags,
{ "Names and Values", "ncp.bit2siflags", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_bit3siflags,
{ "Effective Privileges", "ncp.bit3siflags", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_bit4siflags,
{ "Value Info", "ncp.bit4siflags", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_bit5siflags,
{ "Abbreviated Value", "ncp.bit5siflags", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_bit6siflags,
{ "Not Defined", "ncp.bit6siflags", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_bit7siflags,
{ "Not Defined", "ncp.bit7siflags", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_bit8siflags,
{ "Not Defined", "ncp.bit8siflags", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_bit9siflags,
{ "Expanded Class", "ncp.bit9siflags", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_bit10siflags,
{ "Not Defined", "ncp.bit10siflags", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_bit11siflags,
{ "Not Defined", "ncp.bit11siflags", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_bit12siflags,
{ "Not Defined", "ncp.bit12siflags", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_bit13siflags,
{ "Not Defined", "ncp.bit13siflags", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_bit14siflags,
{ "Not Defined", "ncp.bit14siflags", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_bit15siflags,
{ "Not Defined", "ncp.bit15siflags", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_bit16siflags,
{ "Not Defined", "ncp.bit16siflags", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_nds_segment_overlap,
{ "Segment overlap", "nds.segment.overlap", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Segment overlaps with other segments", HFILL }},
{ &hf_nds_segment_overlap_conflict,
{ "Conflicting data in segment overlap", "nds.segment.overlap.conflict", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Overlapping segments contained conflicting data", HFILL }},
{ &hf_nds_segment_multiple_tails,
{ "Multiple tail segments found", "nds.segment.multipletails", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Several tails were found when desegmenting the packet", HFILL }},
{ &hf_nds_segment_too_long_segment,
{ "Segment too long", "nds.segment.toolongsegment", FT_BOOLEAN, BASE_NONE, NULL, 0x0, "Segment contained data past end of packet", HFILL }},
{ &hf_nds_segment_error,
{ "Desegmentation error", "nds.segment.error", FT_FRAMENUM, BASE_NONE, NULL, 0x0, "Desegmentation error due to illegal segments", HFILL }},
{ &hf_nds_segment_count,
{ "Segment count", "nds.segment.count", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_reassembled_length,
{ "Reassembled NDS length", "nds.reassembled.length", FT_UINT32, BASE_DEC, NULL, 0x0, "The total length of the reassembled payload", HFILL }},
{ &hf_nds_segment,
{ "NDS Fragment", "nds.fragment", FT_FRAMENUM, BASE_NONE, NULL, 0x0, "NDPS Fragment", HFILL }},
{ &hf_nds_segments,
{ "NDS Fragments", "nds.fragments", FT_NONE, BASE_NONE, NULL, 0x0, "NDPS Fragments", HFILL }},
{ &hf_nds_verb2b_req_flags,
{ "Flags", "ncp.nds_verb2b_flags", FT_UINT32, BASE_HEX, VALS(nds_verb2b_flag_vals), 0x0, NULL, HFILL }},
{ &hf_ncp_ip_address,
{ "IP Address", "ncp.ip_addr", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_copyright,
{ "Copyright", "ncp.copyright", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_ndsprot1flag,
{ "Not Defined", "ncp.nds_prot_bit1", FT_BOOLEAN, 16, NULL, 0x00000001, NULL, HFILL }},
{ &hf_ndsprot2flag,
{ "Not Defined", "ncp.nds_prot_bit2", FT_BOOLEAN, 16, NULL, 0x00000002, NULL, HFILL }},
{ &hf_ndsprot3flag,
{ "Not Defined", "ncp.nds_prot_bit3", FT_BOOLEAN, 16, NULL, 0x00000004, NULL, HFILL }},
{ &hf_ndsprot4flag,
{ "Not Defined", "ncp.nds_prot_bit4", FT_BOOLEAN, 16, NULL, 0x00000008, NULL, HFILL }},
{ &hf_ndsprot5flag,
{ "Not Defined", "ncp.nds_prot_bit5", FT_BOOLEAN, 16, NULL, 0x00000010, NULL, HFILL }},
{ &hf_ndsprot6flag,
{ "Not Defined", "ncp.nds_prot_bit6", FT_BOOLEAN, 16, NULL, 0x00000020, NULL, HFILL }},
{ &hf_ndsprot7flag,
{ "Not Defined", "ncp.nds_prot_bit7", FT_BOOLEAN, 16, NULL, 0x00000040, NULL, HFILL }},
{ &hf_ndsprot8flag,
{ "Not Defined", "ncp.nds_prot_bit8", FT_BOOLEAN, 16, NULL, 0x00000080, NULL, HFILL }},
{ &hf_ndsprot9flag,
{ "Not Defined", "ncp.nds_prot_bit9", FT_BOOLEAN, 16, NULL, 0x00000100, NULL, HFILL }},
{ &hf_ndsprot10flag,
{ "Not Defined", "ncp.nds_prot_bit10", FT_BOOLEAN, 16, NULL, 0x00000200, NULL, HFILL }},
{ &hf_ndsprot11flag,
{ "Not Defined", "ncp.nds_prot_bit11", FT_BOOLEAN, 16, NULL, 0x00000400, NULL, HFILL }},
{ &hf_ndsprot12flag,
{ "Not Defined", "ncp.nds_prot_bit12", FT_BOOLEAN, 16, NULL, 0x00000800, NULL, HFILL }},
{ &hf_ndsprot13flag,
{ "Not Defined", "ncp.nds_prot_bit13", FT_BOOLEAN, 16, NULL, 0x00001000, NULL, HFILL }},
{ &hf_ndsprot14flag,
{ "Not Defined", "ncp.nds_prot_bit14", FT_BOOLEAN, 16, NULL, 0x00002000, NULL, HFILL }},
{ &hf_ndsprot15flag,
{ "Include CRC in NDS Header", "ncp.nds_prot_bit15", FT_BOOLEAN, 16, NULL, 0x00004000, NULL, HFILL }},
{ &hf_ndsprot16flag,
{ "Client is a Server", "ncp.nds_prot_bit16", FT_BOOLEAN, 16, NULL, 0x00008000, NULL, HFILL }},
{ &hf_nds_svr_dst_name,
{ "Server Distinguished Name", "ncp.nds_svr_dist_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_tune_mark,
{ "Tune Mark", "ncp.ndstunemark", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
#if 0 /* Unused ? */
{ &hf_nds_create_time,
{ "NDS Creation Time", "ncp.ndscreatetime", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL }},
#endif
{ &hf_srvr_param_string,
{ "Set Parameter Value", "ncp.srvr_param_string", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_srvr_param_number,
{ "Set Parameter Value", "ncp.srvr_param_number", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_srvr_param_boolean,
{ "Set Parameter Value", "ncp.srvr_param_boolean", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_number_of_items,
{ "Number of Items", "ncp.ndsitems", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_nds_iterverb,
{ "NDS Iteration Verb", "ncp.ndsiterverb", FT_UINT32, BASE_DEC_HEX, VALS(iterator_subverbs), 0x0, NULL, HFILL }},
{ &hf_iter_completion_code,
{ "Iteration Completion Code", "ncp.iter_completion_code", FT_UINT32, BASE_HEX, VALS(nds_reply_errors), 0x0, NULL, HFILL }},
#if 0 /* Unused ? */
{ &hf_nds_iterobj,
{ "Iterator Object", "ncp.ndsiterobj", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
#endif
{ &hf_iter_verb_completion_code,
{ "Completion Code", "ncp.iter_verb_completion_code", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_iter_ans,
{ "Iterator Answer", "ncp.iter_answer", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_positionable,
{ "Positionable", "ncp.iterpositionable", FT_BOOLEAN, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_num_skipped,
{ "Number Skipped", "ncp.iternumskipped", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_num_to_skip,
{ "Number to Skip", "ncp.iternumtoskip", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_timelimit,
{ "Time Limit", "ncp.itertimelimit", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_iter_index,
{ "Iterator Index", "ncp.iterindex", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_num_to_get,
{ "Number to Get", "ncp.iternumtoget", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
#if 0 /* Unused ? */
{ &hf_ret_info_type,
{ "Return Information Type", "ncp.iterretinfotype", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
#endif
{ &hf_data_size,
{ "Data Size", "ncp.iterdatasize", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_this_count,
{ "Number of Items", "ncp.itercount", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_max_entries,
{ "Maximum Entries", "ncp.itermaxentries", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_move_position,
{ "Move Position", "ncp.itermoveposition", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_iter_copy,
{ "Iterator Copy", "ncp.itercopy", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_iter_position,
{ "Iteration Position", "ncp.iterposition", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_iter_search,
{ "Search Filter", "ncp.iter_search", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_iter_other,
{ "Other Iteration", "ncp.iterother", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_oid,
{ "Object ID", "ncp.nds_oid", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_ncp_bytes_actually_trans_64,
{ "Bytes Actually Transferred", "ncp.bytes_actually_trans_64", FT_UINT64, BASE_DEC, NULL, 0x0, NULL, HFILL }},
{ &hf_sap_name,
{ "SAP Name", "ncp.sap_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_os_name,
{ "OS Name", "ncp.os_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_vendor_name,
{ "Vendor Name", "ncp.vendor_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_hardware_name,
{ "Hardware Name", "ncp.harware_name", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_no_request_record_found,
{ "No request record found. Parsing is impossible.", "ncp.no_request_record_found", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_search_modifier,
{ "Search Modifier", "ncp.search_modifier", FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL }},
{ &hf_search_pattern,
{ "Search Pattern", "ncp.search_pattern", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_acl_protected_attribute,
{ "Protected Attribute", "ncp.nds_acl_protected_attribute", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_acl_subject,
{ "Subject", "ncp.nds_acl_subject", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL }},
{ &hf_nds_acl_privileges,
{ "Subject", "ncp.nds_acl_privileges", FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL }},
""")
# Print the registration code for the hf variables
for var in sorted_vars:
print(" { &%s," % (var.HFName()))
print(" { \"%s\", \"%s\", %s, %s, %s, 0x%x, NULL, HFILL }},\n" % \
(var.Description(), var.DFilter(),
var.WiresharkFType(), var.Display(), var.ValuesName(),
var.Mask()))
print(" };\n")
if ett_list:
print(" static int *ett[] = {")
for ett in ett_list:
print(" &%s," % (ett,))
print(" };\n")
print("""
static ei_register_info ei[] = {
{ &ei_ncp_file_handle, { "ncp.file_handle.expert", PI_REQUEST_CODE, PI_CHAT, "Close file handle", EXPFILL }},
{ &ei_ncp_file_rights, { "ncp.file_rights", PI_REQUEST_CODE, PI_CHAT, "File rights", EXPFILL }},
{ &ei_ncp_op_lock_handle, { "ncp.op_lock_handle", PI_REQUEST_CODE, PI_CHAT, "Op-lock on handle", EXPFILL }},
{ &ei_ncp_file_rights_change, { "ncp.file_rights.change", PI_REQUEST_CODE, PI_CHAT, "Change handle rights", EXPFILL }},
{ &ei_ncp_effective_rights, { "ncp.effective_rights.expert", PI_RESPONSE_CODE, PI_CHAT, "Handle effective rights", EXPFILL }},
{ &ei_ncp_server, { "ncp.server", PI_RESPONSE_CODE, PI_CHAT, "Server info", EXPFILL }},
{ &ei_iter_verb_completion_code, { "ncp.iter_verb_completion_code.expert", PI_RESPONSE_CODE, PI_ERROR, "Iteration Verb Error", EXPFILL }},
{ &ei_ncp_connection_request, { "ncp.connection_request", PI_RESPONSE_CODE, PI_CHAT, "Connection Request", EXPFILL }},
{ &ei_ncp_destroy_connection, { "ncp.destroy_connection", PI_RESPONSE_CODE, PI_CHAT, "Destroy Connection Request", EXPFILL }},
{ &ei_nds_reply_error, { "ncp.ndsreplyerror.expert", PI_RESPONSE_CODE, PI_ERROR, "NDS Error", EXPFILL }},
{ &ei_nds_iteration, { "ncp.nds_iteration.error", PI_RESPONSE_CODE, PI_ERROR, "NDS Iteration Error", EXPFILL }},
{ &ei_ncp_eid, { "ncp.eid", PI_RESPONSE_CODE, PI_CHAT, "EID", EXPFILL }},
{ &ei_ncp_completion_code, { "ncp.completion_code.expert", PI_RESPONSE_CODE, PI_ERROR, "Code Completion Error", EXPFILL }},
{ &ei_ncp_connection_status, { "ncp.connection_status.bad", PI_RESPONSE_CODE, PI_ERROR, "Error: Bad Connection Status", EXPFILL }},
{ &ei_ncp_connection_destroyed, { "ncp.connection_destroyed", PI_RESPONSE_CODE, PI_CHAT, "Connection Destroyed", EXPFILL }},
{ &ei_ncp_no_request_record_found, { "ncp.no_request_record_found", PI_SEQUENCE, PI_NOTE, "No request record found.", EXPFILL }},
{ &ei_ncp_invalid_offset, { "ncp.invalid_offset", PI_MALFORMED, PI_ERROR, "Invalid offset", EXPFILL }},
{ &ei_ncp_address_type, { "ncp.address_type.unknown", PI_PROTOCOL, PI_WARN, "Unknown Address Type", EXPFILL }},
{ &ei_ncp_value_too_large, { "ncp.value_too_large", PI_MALFORMED, PI_ERROR, "Length value goes past the end of the packet", EXPFILL }},
};
expert_module_t* expert_ncp;
proto_register_field_array(proto_ncp, hf, array_length(hf));""")
if ett_list:
print("""
proto_register_subtree_array(ett, array_length(ett));""")
print("""
expert_ncp = expert_register_protocol(proto_ncp);
expert_register_field_array(expert_ncp, ei, array_length(ei));
register_init_routine(&ncp_init_protocol);
/* fragment */
reassembly_table_register(&nds_reassembly_table,
&addresses_reassembly_table_functions);
ncp_req_hash = wmem_map_new_autoreset(wmem_epan_scope(), wmem_file_scope(), ncp_hash, ncp_equal);
ncp_req_eid_hash = wmem_map_new_autoreset(wmem_epan_scope(), wmem_file_scope(), ncp_eid_hash, ncp_eid_equal);
""")
# End of proto_register_ncp2222()
print("}")
def usage():
print("Usage: ncp2222.py -o output_file")
sys.exit(1)
def main():
global compcode_lists
global ptvc_lists
global msg
optstring = "o:"
out_filename = None
try:
opts, args = getopt.getopt(sys.argv[1:], optstring)
except getopt.error:
usage()
for opt, arg in opts:
if opt == "-o":
out_filename = arg
else:
usage()
if len(args) != 0:
usage()
if not out_filename:
usage()
# Create the output file
try:
out_file = open(out_filename, "w")
except IOError:
sys.exit("Could not open %s for writing: %s" % (out_filename,
IOError))
# Set msg to current stdout
msg = sys.stdout
# Set stdout to the output file
sys.stdout = out_file
msg.write("Processing NCP definitions...\n")
# Run the code, and if we catch any exception,
# erase the output file.
try:
compcode_lists = UniqueCollection('Completion Code Lists')
ptvc_lists = UniqueCollection('PTVC Lists')
define_errors()
define_groups()
define_ncp2222()
msg.write("Defined %d NCP types.\n" % (len(packets),))
produce_code()
except Exception:
traceback.print_exc(20, msg)
try:
out_file.close()
except IOError:
msg.write("Could not close %s: %s\n" % (out_filename, IOError))
try:
if os.path.exists(out_filename):
os.remove(out_filename)
except OSError:
msg.write("Could not remove %s: %s\n" % (out_filename, OSError))
sys.exit(1)
def define_ncp2222():
##############################################################################
# NCP Packets. Here I list functions and subfunctions in hexadecimal like the
# NCP book (and I believe LanAlyzer does this too).
# However, Novell lists these in decimal in their on-line documentation.
##############################################################################
# 2222/01
pkt = NCP(0x01, "File Set Lock", 'sync')
pkt.Request(7)
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/02
pkt = NCP(0x02, "File Release Lock", 'sync')
pkt.Request(7)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xff00])
# 2222/03
pkt = NCP(0x03, "Log File Exclusive", 'sync')
pkt.Request( (12, 267), [
rec( 7, 1, DirHandle ),
rec( 8, 1, LockFlag ),
rec( 9, 2, TimeoutLimit, ENC_BIG_ENDIAN ),
rec( 11, (1, 256), FilePath ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8200, 0x9600, 0xfe0d, 0xff01])
# 2222/04
pkt = NCP(0x04, "Lock File Set", 'sync')
pkt.Request( 9, [
rec( 7, 2, TimeoutLimit ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xfe0d, 0xff01])
## 2222/05
pkt = NCP(0x05, "Release File", 'sync')
pkt.Request( (9, 264), [
rec( 7, 1, DirHandle ),
rec( 8, (1, 256), FilePath ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9b00, 0x9c03, 0xff1a])
# 2222/06
pkt = NCP(0x06, "Release File Set", 'sync')
pkt.Request( 8, [
rec( 7, 1, LockFlag ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/07
pkt = NCP(0x07, "Clear File", 'sync')
pkt.Request( (9, 264), [
rec( 7, 1, DirHandle ),
rec( 8, (1, 256), FilePath ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03,
0xa100, 0xfd00, 0xff1a])
# 2222/08
pkt = NCP(0x08, "Clear File Set", 'sync')
pkt.Request( 8, [
rec( 7, 1, LockFlag ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/09
pkt = NCP(0x09, "Log Logical Record", 'sync')
pkt.Request( (11, 138), [
rec( 7, 1, LockFlag ),
rec( 8, 2, TimeoutLimit, ENC_BIG_ENDIAN ),
rec( 10, (1, 128), LogicalRecordName, info_str=(LogicalRecordName, "Log Logical Record: %s", ", %s")),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xfe0d, 0xff1a])
# 2222/0A, 10
pkt = NCP(0x0A, "Lock Logical Record Set", 'sync')
pkt.Request( 10, [
rec( 7, 1, LockFlag ),
rec( 8, 2, TimeoutLimit ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xfe0d, 0xff1a])
# 2222/0B, 11
pkt = NCP(0x0B, "Clear Logical Record", 'sync')
pkt.Request( (8, 135), [
rec( 7, (1, 128), LogicalRecordName, info_str=(LogicalRecordName, "Clear Logical Record: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xff1a])
# 2222/0C, 12
pkt = NCP(0x0C, "Release Logical Record", 'sync')
pkt.Request( (8, 135), [
rec( 7, (1, 128), LogicalRecordName, info_str=(LogicalRecordName, "Release Logical Record: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xff1a])
# 2222/0D, 13
pkt = NCP(0x0D, "Release Logical Record Set", 'sync')
pkt.Request( 8, [
rec( 7, 1, LockFlag ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/0E, 14
pkt = NCP(0x0E, "Clear Logical Record Set", 'sync')
pkt.Request( 8, [
rec( 7, 1, LockFlag ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/1100, 17/00
pkt = NCP(0x1100, "Write to Spool File", 'print')
pkt.Request( (11, 16), [
rec( 10, ( 1, 6 ), Data, info_str=(Data, "Write to Spool File: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0104, 0x8000, 0x8101, 0x8701, 0x8800,
0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9400, 0x9500,
0x9600, 0x9804, 0x9900, 0xa100, 0xa201, 0xff19])
# 2222/1101, 17/01
pkt = NCP(0x1101, "Close Spool File", 'print')
pkt.Request( 11, [
rec( 10, 1, AbortQueueFlag ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8701, 0x8800, 0x8d00,
0x8e00, 0x8f00, 0x9001, 0x9300, 0x9400, 0x9500,
0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03, 0x9d00,
0xa100, 0xd000, 0xd100, 0xd202, 0xd300, 0xd400,
0xda01, 0xe800, 0xea00, 0xeb00, 0xec00, 0xfc06,
0xfd00, 0xfe07, 0xff06])
# 2222/1102, 17/02
pkt = NCP(0x1102, "Set Spool File Flags", 'print')
pkt.Request( 30, [
rec( 10, 1, PrintFlags ),
rec( 11, 1, TabSize ),
rec( 12, 1, TargetPrinter ),
rec( 13, 1, Copies ),
rec( 14, 1, FormType ),
rec( 15, 1, Reserved ),
rec( 16, 14, BannerName ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xd202, 0xd300, 0xe800, 0xea00,
0xeb00, 0xec00, 0xfc06, 0xfe07, 0xff06])
# 2222/1103, 17/03
pkt = NCP(0x1103, "Spool A Disk File", 'print')
pkt.Request( (12, 23), [
rec( 10, 1, DirHandle ),
rec( 11, (1, 12), Data, info_str=(Data, "Spool a Disk File: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8701, 0x8800, 0x8d00,
0x8e00, 0x8f00, 0x9001, 0x9300, 0x9400, 0x9500,
0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03, 0x9d00,
0xa100, 0xd000, 0xd100, 0xd202, 0xd300, 0xd400,
0xda01, 0xe800, 0xea00, 0xeb00, 0xec00, 0xfc06,
0xfd00, 0xfe07, 0xff06])
# 2222/1106, 17/06
pkt = NCP(0x1106, "Get Printer Status", 'print')
pkt.Request( 11, [
rec( 10, 1, TargetPrinter ),
])
pkt.Reply(12, [
rec( 8, 1, PrinterHalted ),
rec( 9, 1, PrinterOffLine ),
rec( 10, 1, CurrentFormType ),
rec( 11, 1, RedirectedPrinter ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xfb05, 0xfd00, 0xff06])
# 2222/1109, 17/09
pkt = NCP(0x1109, "Create Spool File", 'print')
pkt.Request( (12, 23), [
rec( 10, 1, DirHandle ),
rec( 11, (1, 12), Data, info_str=(Data, "Create Spool File: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8400, 0x8701, 0x8d00,
0x8f00, 0x9001, 0x9400, 0x9600, 0x9804, 0x9900,
0x9b03, 0x9c03, 0xa100, 0xd000, 0xd100, 0xd202,
0xd300, 0xd400, 0xda01, 0xe800, 0xea00, 0xeb00,
0xec00, 0xfc06, 0xfd00, 0xfe07, 0xff06])
# 2222/110A, 17/10
pkt = NCP(0x110A, "Get Printer's Queue", 'print')
pkt.Request( 11, [
rec( 10, 1, TargetPrinter ),
])
pkt.Reply( 12, [
rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xff06])
# 2222/12, 18
pkt = NCP(0x12, "Get Volume Info with Number", 'file')
pkt.Request( 8, [
rec( 7, 1, VolumeNumber,info_str=(VolumeNumber, "Get Volume Information for Volume %d", ", %d") )
])
pkt.Reply( 36, [
rec( 8, 2, SectorsPerCluster, ENC_BIG_ENDIAN ),
rec( 10, 2, TotalVolumeClusters, ENC_BIG_ENDIAN ),
rec( 12, 2, AvailableClusters, ENC_BIG_ENDIAN ),
rec( 14, 2, TotalDirectorySlots, ENC_BIG_ENDIAN ),
rec( 16, 2, AvailableDirectorySlots, ENC_BIG_ENDIAN ),
rec( 18, 16, VolumeName ),
rec( 34, 2, RemovableFlag, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9804])
# 2222/13, 19
pkt = NCP(0x13, "Get Station Number", 'connection')
pkt.Request(7)
pkt.Reply(11, [
rec( 8, 3, StationNumber )
])
pkt.CompletionCodes([0x0000, 0xff00])
# 2222/14, 20
pkt = NCP(0x14, "Get File Server Date And Time", 'fileserver')
pkt.Request(7)
pkt.Reply(15, [
rec( 8, 1, Year ),
rec( 9, 1, Month ),
rec( 10, 1, Day ),
rec( 11, 1, Hour ),
rec( 12, 1, Minute ),
rec( 13, 1, Second ),
rec( 14, 1, DayOfWeek ),
])
pkt.CompletionCodes([0x0000])
# 2222/1500, 21/00
pkt = NCP(0x1500, "Send Broadcast Message", 'message')
pkt.Request((13, 70), [
rec( 10, 1, ClientListLen, var="x" ),
rec( 11, 1, TargetClientList, repeat="x" ),
rec( 12, (1, 58), TargetMessage, info_str=(TargetMessage, "Send Broadcast Message: %s", ", %s") ),
])
pkt.Reply(10, [
rec( 8, 1, ClientListLen, var="x" ),
rec( 9, 1, SendStatus, repeat="x" )
])
pkt.CompletionCodes([0x0000, 0xfd00])
# 2222/1501, 21/01
pkt = NCP(0x1501, "Get Broadcast Message", 'message')
pkt.Request(10)
pkt.Reply((9,66), [
rec( 8, (1, 58), TargetMessage )
])
pkt.CompletionCodes([0x0000, 0xfd00])
# 2222/1502, 21/02
pkt = NCP(0x1502, "Disable Broadcasts", 'message')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xfb0a])
# 2222/1503, 21/03
pkt = NCP(0x1503, "Enable Broadcasts", 'message')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/1509, 21/09
pkt = NCP(0x1509, "Broadcast To Console", 'message')
pkt.Request((11, 68), [
rec( 10, (1, 58), TargetMessage, info_str=(TargetMessage, "Broadcast to Console: %s", ", %s") )
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/150A, 21/10
pkt = NCP(0x150A, "Send Broadcast Message", 'message')
pkt.Request((17, 74), [
rec( 10, 2, ClientListCount, ENC_LITTLE_ENDIAN, var="x" ),
rec( 12, 4, ClientList, ENC_LITTLE_ENDIAN, repeat="x" ),
rec( 16, (1, 58), TargetMessage, info_str=(TargetMessage, "Send Broadcast Message: %s", ", %s") ),
])
pkt.Reply(14, [
rec( 8, 2, ClientListCount, ENC_LITTLE_ENDIAN, var="x" ),
rec( 10, 4, ClientCompFlag, ENC_LITTLE_ENDIAN, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0xfd00])
# 2222/150B, 21/11
pkt = NCP(0x150B, "Get Broadcast Message", 'message')
pkt.Request(10)
pkt.Reply((9,66), [
rec( 8, (1, 58), TargetMessage )
])
pkt.CompletionCodes([0x0000, 0xfd00])
# 2222/150C, 21/12
pkt = NCP(0x150C, "Connection Message Control", 'message')
pkt.Request(22, [
rec( 10, 1, ConnectionControlBits ),
rec( 11, 3, Reserved3 ),
rec( 14, 4, ConnectionListCount, ENC_LITTLE_ENDIAN, var="x" ),
rec( 18, 4, ConnectionList, ENC_LITTLE_ENDIAN, repeat="x" ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xff00])
# 2222/1600, 22/0
pkt = NCP(0x1600, "Set Directory Handle", 'file')
pkt.Request((13,267), [
rec( 10, 1, TargetDirHandle ),
rec( 11, 1, DirHandle ),
rec( 12, (1, 255), Path, info_str=(Path, "Set Directory Handle to: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00,
0xfd00, 0xff00])
# 2222/1601, 22/1
pkt = NCP(0x1601, "Get Directory Path", 'file')
pkt.Request(11, [
rec( 10, 1, DirHandle,info_str=(DirHandle, "Get Directory Path for Directory Handle %d", ", %d") ),
])
pkt.Reply((9,263), [
rec( 8, (1,255), Path ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9b00, 0x9c00, 0xa100])
# 2222/1602, 22/2
pkt = NCP(0x1602, "Scan Directory Information", 'file')
pkt.Request((14,268), [
rec( 10, 1, DirHandle ),
rec( 11, 2, StartingSearchNumber, ENC_BIG_ENDIAN ),
rec( 13, (1, 255), Path, info_str=(Path, "Scan Directory Information: %s", ", %s") ),
])
pkt.Reply(36, [
rec( 8, 16, DirectoryPath ),
rec( 24, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 26, 2, CreationTime, ENC_BIG_ENDIAN ),
rec( 28, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 32, 1, AccessRightsMask ),
rec( 33, 1, Reserved ),
rec( 34, 2, NextSearchNumber, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00,
0xfd00, 0xff00])
# 2222/1603, 22/3
pkt = NCP(0x1603, "Get Effective Directory Rights", 'file')
pkt.Request((12,266), [
rec( 10, 1, DirHandle ),
rec( 11, (1, 255), Path, info_str=(Path, "Get Effective Directory Rights: %s", ", %s") ),
])
pkt.Reply(9, [
rec( 8, 1, AccessRightsMask ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00,
0xfd00, 0xff00])
# 2222/1604, 22/4
pkt = NCP(0x1604, "Modify Maximum Rights Mask", 'file')
pkt.Request((14,268), [
rec( 10, 1, DirHandle ),
rec( 11, 1, RightsGrantMask ),
rec( 12, 1, RightsRevokeMask ),
rec( 13, (1, 255), Path, info_str=(Path, "Modify Maximum Rights Mask: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100, 0xfa00,
0xfd00, 0xff00])
# 2222/1605, 22/5
pkt = NCP(0x1605, "Get Volume Number", 'file')
pkt.Request((11, 265), [
rec( 10, (1,255), VolumeNameLen, info_str=(VolumeNameLen, "Get Volume Number for: %s", ", %s") ),
])
pkt.Reply(9, [
rec( 8, 1, VolumeNumber ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804])
# 2222/1606, 22/6
pkt = NCP(0x1606, "Get Volume Name", 'file')
pkt.Request(11, [
rec( 10, 1, VolumeNumber,info_str=(VolumeNumber, "Get Name for Volume %d", ", %d") ),
])
pkt.Reply((9, 263), [
rec( 8, (1,255), VolumeNameLen ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0xff00])
# 2222/160A, 22/10
pkt = NCP(0x160A, "Create Directory", 'file')
pkt.Request((13,267), [
rec( 10, 1, DirHandle ),
rec( 11, 1, AccessRightsMask ),
rec( 12, (1, 255), Path, info_str=(Path, "Create Directory: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8400, 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03,
0x9e00, 0xa100, 0xfd00, 0xff00])
# 2222/160B, 22/11
pkt = NCP(0x160B, "Delete Directory", 'file')
pkt.Request((13,267), [
rec( 10, 1, DirHandle ),
rec( 11, 1, Reserved ),
rec( 12, (1, 255), Path, info_str=(Path, "Delete Directory: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8a00, 0x9600, 0x9804, 0x9b03, 0x9c03,
0x9f00, 0xa000, 0xa100, 0xfd00, 0xff00])
# 2222/160C, 22/12
pkt = NCP(0x160C, "Scan Directory for Trustees", 'file')
pkt.Request((13,267), [
rec( 10, 1, DirHandle ),
rec( 11, 1, TrusteeSetNumber ),
rec( 12, (1, 255), Path, info_str=(Path, "Scan Directory for Trustees: %s", ", %s") ),
])
pkt.Reply(57, [
rec( 8, 16, DirectoryPath ),
rec( 24, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 26, 2, CreationTime, ENC_BIG_ENDIAN ),
rec( 28, 4, CreatorID ),
rec( 32, 4, TrusteeID, ENC_BIG_ENDIAN ),
rec( 36, 4, TrusteeID, ENC_BIG_ENDIAN ),
rec( 40, 4, TrusteeID, ENC_BIG_ENDIAN ),
rec( 44, 4, TrusteeID, ENC_BIG_ENDIAN ),
rec( 48, 4, TrusteeID, ENC_BIG_ENDIAN ),
rec( 52, 1, AccessRightsMask ),
rec( 53, 1, AccessRightsMask ),
rec( 54, 1, AccessRightsMask ),
rec( 55, 1, AccessRightsMask ),
rec( 56, 1, AccessRightsMask ),
])
pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9b03, 0x9c03,
0xa100, 0xfd00, 0xff00])
# 2222/160D, 22/13
pkt = NCP(0x160D, "Add Trustee to Directory", 'file')
pkt.Request((17,271), [
rec( 10, 1, DirHandle ),
rec( 11, 4, TrusteeID, ENC_BIG_ENDIAN ),
rec( 15, 1, AccessRightsMask ),
rec( 16, (1, 255), Path, info_str=(Path, "Add Trustee to Directory: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03,
0xa100, 0xfc06, 0xfd00, 0xff00])
# 2222/160E, 22/14
pkt = NCP(0x160E, "Delete Trustee from Directory", 'file')
pkt.Request((17,271), [
rec( 10, 1, DirHandle ),
rec( 11, 4, TrusteeID, ENC_BIG_ENDIAN ),
rec( 15, 1, Reserved ),
rec( 16, (1, 255), Path, info_str=(Path, "Delete Trustee from Directory: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9900, 0x9b03, 0x9c03,
0xa100, 0xfc06, 0xfd00, 0xfe07, 0xff00])
# 2222/160F, 22/15
pkt = NCP(0x160F, "Rename Directory", 'file')
pkt.Request((13, 521), [
rec( 10, 1, DirHandle ),
rec( 11, (1, 255), Path, info_str=(Path, "Rename Directory: %s", ", %s") ),
rec( -1, (1, 255), NewPath ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8b00, 0x9200, 0x9600, 0x9804, 0x9b03, 0x9c03,
0x9e00, 0xa100, 0xef00, 0xfd00, 0xff00])
# 2222/1610, 22/16
pkt = NCP(0x1610, "Purge Erased Files", 'file')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8100, 0x9600, 0x9804, 0xa100, 0xff00])
# 2222/1611, 22/17
pkt = NCP(0x1611, "Recover Erased File", 'file')
pkt.Request(11, [
rec( 10, 1, DirHandle,info_str=(DirHandle, "Recover Erased File from Directory Handle %d", ", %d") ),
])
pkt.Reply(38, [
rec( 8, 15, OldFileName ),
rec( 23, 15, NewFileName ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03,
0xa100, 0xfd00, 0xff00])
# 2222/1612, 22/18
pkt = NCP(0x1612, "Alloc Permanent Directory Handle", 'file')
pkt.Request((13, 267), [
rec( 10, 1, DirHandle ),
rec( 11, 1, DirHandleName ),
rec( 12, (1,255), Path, info_str=(Path, "Allocate Permanent Directory Handle: %s", ", %s") ),
])
pkt.Reply(10, [
rec( 8, 1, DirHandle ),
rec( 9, 1, AccessRightsMask ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9900, 0x9b00, 0x9c03, 0x9d00,
0xa100, 0xfd00, 0xff00])
# 2222/1613, 22/19
pkt = NCP(0x1613, "Alloc Temporary Directory Handle", 'file')
pkt.Request((13, 267), [
rec( 10, 1, DirHandle ),
rec( 11, 1, DirHandleName ),
rec( 12, (1,255), Path, info_str=(Path, "Allocate Temporary Directory Handle: %s", ", %s") ),
])
pkt.Reply(10, [
rec( 8, 1, DirHandle ),
rec( 9, 1, AccessRightsMask ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9900, 0x9c03, 0x9d00,
0xa100, 0xfd00, 0xff00])
# 2222/1614, 22/20
pkt = NCP(0x1614, "Deallocate Directory Handle", 'file')
pkt.Request(11, [
rec( 10, 1, DirHandle,info_str=(DirHandle, "Deallocate Directory Handle %d", ", %d") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9b03])
# 2222/1615, 22/21
pkt = NCP(0x1615, "Get Volume Info with Handle", 'file')
pkt.Request( 11, [
rec( 10, 1, DirHandle,info_str=(DirHandle, "Get Volume Information with Handle %d", ", %d") )
])
pkt.Reply( 36, [
rec( 8, 2, SectorsPerCluster, ENC_BIG_ENDIAN ),
rec( 10, 2, TotalVolumeClusters, ENC_BIG_ENDIAN ),
rec( 12, 2, AvailableClusters, ENC_BIG_ENDIAN ),
rec( 14, 2, TotalDirectorySlots, ENC_BIG_ENDIAN ),
rec( 16, 2, AvailableDirectorySlots, ENC_BIG_ENDIAN ),
rec( 18, 16, VolumeName ),
rec( 34, 2, RemovableFlag, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0xff00])
# 2222/1616, 22/22
pkt = NCP(0x1616, "Alloc Special Temporary Directory Handle", 'file')
pkt.Request((13, 267), [
rec( 10, 1, DirHandle ),
rec( 11, 1, DirHandleName ),
rec( 12, (1,255), Path, info_str=(Path, "Allocate Special Temporary Directory Handle: %s", ", %s") ),
])
pkt.Reply(10, [
rec( 8, 1, DirHandle ),
rec( 9, 1, AccessRightsMask ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9900, 0x9b00, 0x9c03, 0x9d00,
0xa100, 0xfd00, 0xff00])
# 2222/1617, 22/23
pkt = NCP(0x1617, "Extract a Base Handle", 'file')
pkt.Request(11, [
rec( 10, 1, DirHandle, info_str=(DirHandle, "Extract a Base Handle from Directory Handle %d", ", %d") ),
])
pkt.Reply(22, [
rec( 8, 10, ServerNetworkAddress ),
rec( 18, 4, DirHandleLong ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9b03])
# 2222/1618, 22/24
pkt = NCP(0x1618, "Restore an Extracted Base Handle", 'file')
pkt.Request(24, [
rec( 10, 10, ServerNetworkAddress ),
rec( 20, 4, DirHandleLong ),
])
pkt.Reply(10, [
rec( 8, 1, DirHandle ),
rec( 9, 1, AccessRightsMask ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c00, 0x9d00, 0xa100,
0xfd00, 0xff00])
# 2222/1619, 22/25
pkt = NCP(0x1619, "Set Directory Information", 'file')
pkt.Request((21, 275), [
rec( 10, 1, DirHandle ),
rec( 11, 2, CreationDate ),
rec( 13, 2, CreationTime ),
rec( 15, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 19, 1, AccessRightsMask ),
rec( 20, (1,255), Path, info_str=(Path, "Set Directory Information: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9804, 0x9b03, 0x9c00, 0xa100,
0xff16])
# 2222/161A, 22/26
pkt = NCP(0x161A, "Get Path Name of a Volume-Directory Number Pair", 'file')
pkt.Request(13, [
rec( 10, 1, VolumeNumber ),
rec( 11, 2, DirectoryEntryNumberWord ),
])
pkt.Reply((9,263), [
rec( 8, (1,255), Path ),
])
pkt.CompletionCodes([0x0000, 0x9804, 0x9c00, 0xa100])
# 2222/161B, 22/27
pkt = NCP(0x161B, "Scan Salvageable Files", 'file')
pkt.Request(15, [
rec( 10, 1, DirHandle ),
rec( 11, 4, SequenceNumber ),
])
pkt.Reply(140, [
rec( 8, 4, SequenceNumber ),
rec( 12, 2, Subdirectory ),
rec( 14, 2, Reserved2 ),
rec( 16, 4, AttributesDef32 ),
rec( 20, 1, UniqueID ),
rec( 21, 1, FlagsDef ),
rec( 22, 1, DestNameSpace ),
rec( 23, 1, FileNameLen ),
rec( 24, 12, FileName12 ),
rec( 36, 2, CreationTime ),
rec( 38, 2, CreationDate ),
rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 44, 2, ArchivedTime ),
rec( 46, 2, ArchivedDate ),
rec( 48, 4, ArchiverID, ENC_BIG_ENDIAN ),
rec( 52, 2, UpdateTime ),
rec( 54, 2, UpdateDate ),
rec( 56, 4, UpdateID, ENC_BIG_ENDIAN ),
rec( 60, 4, FileSize, ENC_BIG_ENDIAN ),
rec( 64, 44, Reserved44 ),
rec( 108, 2, InheritedRightsMask ),
rec( 110, 2, LastAccessedDate ),
rec( 112, 4, DeletedFileTime ),
rec( 116, 2, DeletedTime ),
rec( 118, 2, DeletedDate ),
rec( 120, 4, DeletedID, ENC_BIG_ENDIAN ),
rec( 124, 16, Reserved16 ),
])
pkt.CompletionCodes([0x0000, 0xfb01, 0x9801, 0xff1d])
# 2222/161C, 22/28
pkt = NCP(0x161C, "Recover Salvageable File", 'file')
pkt.Request((17,525), [
rec( 10, 1, DirHandle ),
rec( 11, 4, SequenceNumber ),
rec( 15, (1, 255), FileName, info_str=(FileName, "Recover File: %s", ", %s") ),
rec( -1, (1, 255), NewFileNameLen ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8401, 0x9c03, 0xfe02])
# 2222/161D, 22/29
pkt = NCP(0x161D, "Purge Salvageable File", 'file')
pkt.Request(15, [
rec( 10, 1, DirHandle ),
rec( 11, 4, SequenceNumber ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8500, 0x9c03])
# 2222/161E, 22/30
pkt = NCP(0x161E, "Scan a Directory", 'file')
pkt.Request((17, 271), [
rec( 10, 1, DirHandle ),
rec( 11, 1, DOSFileAttributes ),
rec( 12, 4, SequenceNumber ),
rec( 16, (1, 255), SearchPattern, info_str=(SearchPattern, "Scan a Directory: %s", ", %s") ),
])
pkt.Reply(140, [
rec( 8, 4, SequenceNumber ),
rec( 12, 4, Subdirectory ),
rec( 16, 4, AttributesDef32 ),
rec( 20, 1, UniqueID, ENC_LITTLE_ENDIAN ),
rec( 21, 1, PurgeFlags ),
rec( 22, 1, DestNameSpace ),
rec( 23, 1, NameLen ),
rec( 24, 12, Name12 ),
rec( 36, 2, CreationTime ),
rec( 38, 2, CreationDate ),
rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 44, 2, ArchivedTime ),
rec( 46, 2, ArchivedDate ),
rec( 48, 4, ArchiverID, ENC_BIG_ENDIAN ),
rec( 52, 2, UpdateTime ),
rec( 54, 2, UpdateDate ),
rec( 56, 4, UpdateID, ENC_BIG_ENDIAN ),
rec( 60, 4, FileSize, ENC_BIG_ENDIAN ),
rec( 64, 44, Reserved44 ),
rec( 108, 2, InheritedRightsMask ),
rec( 110, 2, LastAccessedDate ),
rec( 112, 28, Reserved28 ),
])
pkt.CompletionCodes([0x0000, 0x8500, 0x9c03])
# 2222/161F, 22/31
pkt = NCP(0x161F, "Get Directory Entry", 'file')
pkt.Request(11, [
rec( 10, 1, DirHandle ),
])
pkt.Reply(136, [
rec( 8, 4, Subdirectory ),
rec( 12, 4, AttributesDef32 ),
rec( 16, 1, UniqueID, ENC_LITTLE_ENDIAN ),
rec( 17, 1, PurgeFlags ),
rec( 18, 1, DestNameSpace ),
rec( 19, 1, NameLen ),
rec( 20, 12, Name12 ),
rec( 32, 2, CreationTime ),
rec( 34, 2, CreationDate ),
rec( 36, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 40, 2, ArchivedTime ),
rec( 42, 2, ArchivedDate ),
rec( 44, 4, ArchiverID, ENC_BIG_ENDIAN ),
rec( 48, 2, UpdateTime ),
rec( 50, 2, UpdateDate ),
rec( 52, 4, NextTrusteeEntry, ENC_BIG_ENDIAN ),
rec( 56, 48, Reserved48 ),
rec( 104, 2, MaximumSpace ),
rec( 106, 2, InheritedRightsMask ),
rec( 108, 28, Undefined28 ),
])
pkt.CompletionCodes([0x0000, 0x8900, 0xbf00, 0xfb00])
# 2222/1620, 22/32
pkt = NCP(0x1620, "Scan Volume's User Disk Restrictions", 'file')
pkt.Request(15, [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, SequenceNumber ),
])
pkt.Reply(17, [
rec( 8, 1, NumberOfEntries, var="x" ),
rec( 9, 8, ObjectIDStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9800])
# 2222/1621, 22/33
pkt = NCP(0x1621, "Add User Disk Space Restriction", 'file')
pkt.Request(19, [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, ObjectID ),
rec( 15, 4, DiskSpaceLimit ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9800])
# 2222/1622, 22/34
pkt = NCP(0x1622, "Remove User Disk Space Restrictions", 'file')
pkt.Request(15, [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, ObjectID ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8c00, 0xfe0e])
# 2222/1623, 22/35
pkt = NCP(0x1623, "Get Directory Disk Space Restriction", 'file')
pkt.Request(11, [
rec( 10, 1, DirHandle ),
])
pkt.Reply(18, [
rec( 8, 1, NumberOfEntries ),
rec( 9, 1, Level ),
rec( 10, 4, MaxSpace ),
rec( 14, 4, CurrentSpace ),
])
pkt.CompletionCodes([0x0000])
# 2222/1624, 22/36
pkt = NCP(0x1624, "Set Directory Disk Space Restriction", 'file')
pkt.Request(15, [
rec( 10, 1, DirHandle ),
rec( 11, 4, DiskSpaceLimit ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0101, 0x8c00, 0xbf00])
# 2222/1625, 22/37
pkt = NCP(0x1625, "Set Directory Entry Information", 'file')
pkt.Request(NO_LENGTH_CHECK, [
#
# XXX - this didn't match what was in the spec for 22/37
# on the Novell Web site.
#
rec( 10, 1, DirHandle ),
rec( 11, 1, SearchAttributes ),
rec( 12, 4, SequenceNumber ),
rec( 16, 2, ChangeBits ),
rec( 18, 2, Reserved2 ),
rec( 20, 4, Subdirectory ),
#srec(DOSDirectoryEntryStruct, req_cond="ncp.search_att_sub == TRUE"),
srec(DOSFileEntryStruct, req_cond="ncp.search_att_sub == FALSE"),
])
pkt.Reply(8)
pkt.ReqCondSizeConstant()
pkt.CompletionCodes([0x0000, 0x0106, 0x8c00, 0xbf00])
# 2222/1626, 22/38
pkt = NCP(0x1626, "Scan File or Directory for Extended Trustees", 'file')
pkt.Request((13,267), [
rec( 10, 1, DirHandle ),
rec( 11, 1, SequenceByte ),
rec( 12, (1, 255), Path, info_str=(Path, "Scan for Extended Trustees: %s", ", %s") ),
])
pkt.Reply(91, [
rec( 8, 1, NumberOfEntries, var="x" ),
rec( 9, 4, ObjectID ),
rec( 13, 4, ObjectID ),
rec( 17, 4, ObjectID ),
rec( 21, 4, ObjectID ),
rec( 25, 4, ObjectID ),
rec( 29, 4, ObjectID ),
rec( 33, 4, ObjectID ),
rec( 37, 4, ObjectID ),
rec( 41, 4, ObjectID ),
rec( 45, 4, ObjectID ),
rec( 49, 4, ObjectID ),
rec( 53, 4, ObjectID ),
rec( 57, 4, ObjectID ),
rec( 61, 4, ObjectID ),
rec( 65, 4, ObjectID ),
rec( 69, 4, ObjectID ),
rec( 73, 4, ObjectID ),
rec( 77, 4, ObjectID ),
rec( 81, 4, ObjectID ),
rec( 85, 4, ObjectID ),
rec( 89, 2, AccessRightsMaskWord, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9800, 0x9b00, 0x9c00])
# 2222/1627, 22/39
pkt = NCP(0x1627, "Add Extended Trustee to Directory or File", 'file')
pkt.Request((18,272), [
rec( 10, 1, DirHandle ),
rec( 11, 4, ObjectID, ENC_BIG_ENDIAN ),
rec( 15, 2, TrusteeRights ),
rec( 17, (1, 255), Path, info_str=(Path, "Add Extended Trustee: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9000])
# 2222/1628, 22/40
pkt = NCP(0x1628, "Scan Directory Disk Space", 'file')
pkt.Request((17,271), [
rec( 10, 1, DirHandle ),
rec( 11, 1, SearchAttributes ),
rec( 12, 4, SequenceNumber ),
rec( 16, (1, 255), SearchPattern, info_str=(SearchPattern, "Scan Directory Disk Space: %s", ", %s") ),
])
pkt.Reply((148), [
rec( 8, 4, SequenceNumber ),
rec( 12, 4, Subdirectory ),
rec( 16, 4, AttributesDef32 ),
rec( 20, 1, UniqueID ),
rec( 21, 1, PurgeFlags ),
rec( 22, 1, DestNameSpace ),
rec( 23, 1, NameLen ),
rec( 24, 12, Name12 ),
rec( 36, 2, CreationTime ),
rec( 38, 2, CreationDate ),
rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 44, 2, ArchivedTime ),
rec( 46, 2, ArchivedDate ),
rec( 48, 4, ArchiverID, ENC_BIG_ENDIAN ),
rec( 52, 2, UpdateTime ),
rec( 54, 2, UpdateDate ),
rec( 56, 4, UpdateID, ENC_BIG_ENDIAN ),
rec( 60, 4, DataForkSize, ENC_BIG_ENDIAN ),
rec( 64, 4, DataForkFirstFAT, ENC_BIG_ENDIAN ),
rec( 68, 4, NextTrusteeEntry, ENC_BIG_ENDIAN ),
rec( 72, 36, Reserved36 ),
rec( 108, 2, InheritedRightsMask ),
rec( 110, 2, LastAccessedDate ),
rec( 112, 4, DeletedFileTime ),
rec( 116, 2, DeletedTime ),
rec( 118, 2, DeletedDate ),
rec( 120, 4, DeletedID, ENC_BIG_ENDIAN ),
rec( 124, 8, Undefined8 ),
rec( 132, 4, PrimaryEntry, ENC_LITTLE_ENDIAN ),
rec( 136, 4, NameList, ENC_LITTLE_ENDIAN ),
rec( 140, 4, OtherFileForkSize, ENC_BIG_ENDIAN ),
rec( 144, 4, OtherFileForkFAT, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8900, 0x9c03, 0xfb01, 0xff00])
# 2222/1629, 22/41
pkt = NCP(0x1629, "Get Object Disk Usage and Restrictions", 'file')
pkt.Request(15, [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, ObjectID, ENC_LITTLE_ENDIAN ),
])
pkt.Reply(16, [
rec( 8, 4, Restriction ),
rec( 12, 4, InUse ),
])
pkt.CompletionCodes([0x0000, 0x9802])
# 2222/162A, 22/42
pkt = NCP(0x162A, "Get Effective Rights for Directory Entry", 'file')
pkt.Request((12,266), [
rec( 10, 1, DirHandle ),
rec( 11, (1, 255), Path, info_str=(Path, "Get Effective Rights: %s", ", %s") ),
])
pkt.Reply(10, [
rec( 8, 2, AccessRightsMaskWord ),
])
pkt.CompletionCodes([0x0000, 0x9804, 0x9c03])
# 2222/162B, 22/43
pkt = NCP(0x162B, "Remove Extended Trustee from Dir or File", 'file')
pkt.Request((17,271), [
rec( 10, 1, DirHandle ),
rec( 11, 4, ObjectID, ENC_BIG_ENDIAN ),
rec( 15, 1, Unused ),
rec( 16, (1, 255), Path, info_str=(Path, "Remove Extended Trustee from %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9002, 0x9c03, 0xfe0f, 0xff09])
# 2222/162C, 22/44
pkt = NCP(0x162C, "Get Volume and Purge Information", 'file')
pkt.Request( 11, [
rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Volume and Purge Information for Volume %d", ", %d") )
])
pkt.Reply( (38,53), [
rec( 8, 4, TotalBlocks ),
rec( 12, 4, FreeBlocks ),
rec( 16, 4, PurgeableBlocks ),
rec( 20, 4, NotYetPurgeableBlocks ),
rec( 24, 4, TotalDirectoryEntries ),
rec( 28, 4, AvailableDirEntries ),
rec( 32, 4, Reserved4 ),
rec( 36, 1, SectorsPerBlock ),
rec( 37, (1,16), VolumeNameLen ),
])
pkt.CompletionCodes([0x0000])
# 2222/162D, 22/45
pkt = NCP(0x162D, "Get Directory Information", 'file')
pkt.Request( 11, [
rec( 10, 1, DirHandle )
])
pkt.Reply( (30, 45), [
rec( 8, 4, TotalBlocks ),
rec( 12, 4, AvailableBlocks ),
rec( 16, 4, TotalDirectoryEntries ),
rec( 20, 4, AvailableDirEntries ),
rec( 24, 4, Reserved4 ),
rec( 28, 1, SectorsPerBlock ),
rec( 29, (1,16), VolumeNameLen ),
])
pkt.CompletionCodes([0x0000, 0x9b03])
# 2222/162E, 22/46
pkt = NCP(0x162E, "Rename Or Move", 'file')
pkt.Request( (17,525), [
rec( 10, 1, SourceDirHandle ),
rec( 11, 1, SearchAttributes ),
rec( 12, 1, SourcePathComponentCount ),
rec( 13, (1,255), SourcePath, info_str=(SourcePath, "Rename or Move: %s", ", %s") ),
rec( -1, 1, DestDirHandle ),
rec( -1, 1, DestPathComponentCount ),
rec( -1, (1,255), DestPath ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8701, 0x8b00, 0x8d00, 0x8e00,
0x8f00, 0x9001, 0x9101, 0x9201, 0x9a00, 0x9b03,
0x9c03, 0xa400, 0xff17])
# 2222/162F, 22/47
pkt = NCP(0x162F, "Get Name Space Information", 'file')
pkt.Request( 11, [
rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Name Space Information for Volume %d", ", %d") )
])
pkt.Reply( (15,523), [
#
# XXX - why does this not display anything at all
# if the stuff after the first IndexNumber is
# un-commented? That stuff really is there....
#
rec( 8, 1, DefinedNameSpaces, var="v" ),
rec( 9, (1,255), NameSpaceName, repeat="v" ),
rec( -1, 1, DefinedDataStreams, var="w" ),
rec( -1, (2,256), DataStreamInfo, repeat="w" ),
rec( -1, 1, LoadedNameSpaces, var="x" ),
rec( -1, 1, IndexNumber, repeat="x" ),
# rec( -1, 1, VolumeNameSpaces, var="y" ),
# rec( -1, 1, IndexNumber, repeat="y" ),
# rec( -1, 1, VolumeDataStreams, var="z" ),
# rec( -1, 1, IndexNumber, repeat="z" ),
])
pkt.CompletionCodes([0x0000, 0x9802, 0xff00])
# 2222/1630, 22/48
pkt = NCP(0x1630, "Get Name Space Directory Entry", 'file')
pkt.Request( 16, [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, DOSSequence ),
rec( 15, 1, SrcNameSpace ),
])
pkt.Reply( 112, [
rec( 8, 4, SequenceNumber ),
rec( 12, 4, Subdirectory ),
rec( 16, 4, AttributesDef32 ),
rec( 20, 1, UniqueID ),
rec( 21, 1, Flags ),
rec( 22, 1, SrcNameSpace ),
rec( 23, 1, NameLength ),
rec( 24, 12, Name12 ),
rec( 36, 2, CreationTime ),
rec( 38, 2, CreationDate ),
rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 44, 2, ArchivedTime ),
rec( 46, 2, ArchivedDate ),
rec( 48, 4, ArchiverID ),
rec( 52, 2, UpdateTime ),
rec( 54, 2, UpdateDate ),
rec( 56, 4, UpdateID ),
rec( 60, 4, FileSize ),
rec( 64, 44, Reserved44 ),
rec( 108, 2, InheritedRightsMask ),
rec( 110, 2, LastAccessedDate ),
])
pkt.CompletionCodes([0x0000, 0x8900, 0x9802, 0xbf00])
# 2222/1631, 22/49
pkt = NCP(0x1631, "Open Data Stream", 'file')
pkt.Request( (15,269), [
rec( 10, 1, DataStream ),
rec( 11, 1, DirHandle ),
rec( 12, 1, AttributesDef ),
rec( 13, 1, OpenRights ),
rec( 14, (1, 255), FileName, info_str=(FileName, "Open Data Stream: %s", ", %s") ),
])
pkt.Reply( 12, [
rec( 8, 4, CCFileHandle, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8200, 0x9002, 0xbe00, 0xff00])
# 2222/1632, 22/50
pkt = NCP(0x1632, "Get Object Effective Rights for Directory Entry", 'file')
pkt.Request( (16,270), [
rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
rec( 14, 1, DirHandle ),
rec( 15, (1, 255), Path, info_str=(Path, "Get Object Effective Rights: %s", ", %s") ),
])
pkt.Reply( 10, [
rec( 8, 2, TrusteeRights ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x9b00, 0x9c03, 0xfc06])
# 2222/1633, 22/51
pkt = NCP(0x1633, "Get Extended Volume Information", 'file')
pkt.Request( 11, [
rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Extended Volume Information for Volume %d", ", %d") ),
])
pkt.Reply( (139,266), [
rec( 8, 2, VolInfoReplyLen ),
rec( 10, 128, VolInfoStructure),
rec( 138, (1,128), VolumeNameLen ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x9804, 0xfb08, 0xff00])
pkt.MakeExpert("ncp1633_reply")
# 2222/1634, 22/52
pkt = NCP(0x1634, "Get Mount Volume List", 'file')
pkt.Request( 22, [
rec( 10, 4, StartVolumeNumber ),
rec( 14, 4, VolumeRequestFlags, ENC_LITTLE_ENDIAN ),
rec( 18, 4, SrcNameSpace ),
])
pkt.Reply( NO_LENGTH_CHECK, [
rec( 8, 4, ItemsInPacket, var="x" ),
rec( 12, 4, NextVolumeNumber ),
srec( VolumeStruct, req_cond="ncp.volume_request_flags==0x0000", repeat="x" ),
srec( VolumeWithNameStruct, req_cond="ncp.volume_request_flags==0x0001", repeat="x" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x9802])
# 2222/1635, 22/53
pkt = NCP(0x1635, "Get Volume Capabilities", 'file')
pkt.Request( 18, [
rec( 10, 4, VolumeNumberLong ),
rec( 14, 4, VersionNumberLong ),
])
pkt.Reply( NO_LENGTH_CHECK, [
rec( 8, 4, VolumeCapabilities ),
rec( 12, 28, Reserved28 ),
rec( 40, 64, VolumeNameStringz ),
rec( 104, 128, VolumeGUID ),
rec( 232, 256, PoolName ),
rec( 488, PROTO_LENGTH_UNKNOWN, VolumeMountPoint ),
])
pkt.CompletionCodes([0x0000, 0x7700, 0x9802, 0xfb01])
# 2222/1636, 22/54
pkt = NCP(0x1636, "Add User Disk Space Restriction 64 Bit Aware", 'file')
pkt.Request(26, [
rec( 10, 4, VolumeNumberLong ),
rec( 14, 4, ObjectID, ENC_LITTLE_ENDIAN ),
rec( 18, 8, DiskSpaceLimit64 ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8c00, 0x9600, 0x9800])
# 2222/1637, 22/55
pkt = NCP(0x1637, "Get Object Disk Usage and Restrictions 64 Bit Aware", 'file')
pkt.Request(18, [
rec( 10, 4, VolumeNumberLong ),
rec( 14, 4, ObjectID, ENC_LITTLE_ENDIAN ),
])
pkt.Reply(24, [
rec( 8, 8, RestrictionQuad ),
rec( 16, 8, InUse64 ),
])
pkt.CompletionCodes([0x0000, 0x9802])
# 2222/1638, 22/56
pkt = NCP(0x1638, "Scan Volume's User Disk Restrictions 64 Bit Aware", 'file')
pkt.Request(18, [
rec( 10, 4, VolumeNumberLong ),
rec( 14, 4, SequenceNumber ),
])
pkt.Reply(24, [
rec( 8, 4, NumberOfEntriesLong, var="x" ),
rec( 12, 12, ObjectIDStruct64, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9800])
# 2222/1639, 22/57
pkt = NCP(0x1639, "Set Directory Disk Space Restriction 64 Bit Aware", 'file')
pkt.Request(26, [
rec( 10, 8, DirHandle64 ),
rec( 18, 8, DiskSpaceLimit64 ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0101, 0x8c00, 0xbf00])
# 2222/163A, 22/58
pkt = NCP(0x163A, "Get Directory Information 64 Bit Aware", 'file')
pkt.Request( 18, [
rec( 10, 8, DirHandle64 )
])
pkt.Reply( (49, 64), [
rec( 8, 8, TotalBlocks64 ),
rec( 16, 8, AvailableBlocks64 ),
rec( 24, 8, TotalDirEntries64 ),
rec( 32, 8, AvailableDirEntries64 ),
rec( 40, 4, Reserved4 ),
rec( 44, 4, SectorsPerBlockLong ),
rec( 48, (1,16), VolumeNameLen ),
])
pkt.CompletionCodes([0x0000, 0x9b03])
# 2222/1641, 22/59
# pkt = NCP(0x1641, "Scan Volume's User Disk Restrictions 64-bit Aware", 'file')
# pkt.Request(18, [
# rec( 10, 4, VolumeNumberLong ),
# rec( 14, 4, SequenceNumber ),
# ])
# pkt.Reply(24, [
# rec( 8, 4, NumberOfEntriesLong, var="x" ),
# rec( 12, 12, ObjectIDStruct64, repeat="x" ),
# ])
# pkt.CompletionCodes([0x0000, 0x9800])
# 2222/1700, 23/00
pkt = NCP(0x1700, "Login User", 'connection')
pkt.Request( (12, 58), [
rec( 10, (1,16), UserName, info_str=(UserName, "Login User: %s", ", %s") ),
rec( -1, (1,32), Password ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9602, 0xc101, 0xc200, 0xc501, 0xd700,
0xd900, 0xda00, 0xdb00, 0xde00, 0xdf00, 0xe800,
0xec00, 0xed00, 0xef00, 0xf001, 0xf100, 0xf200,
0xf600, 0xfb00, 0xfc06, 0xfe07, 0xff00])
# 2222/1701, 23/01
pkt = NCP(0x1701, "Change User Password", 'bindery')
pkt.Request( (13, 90), [
rec( 10, (1,16), UserName, info_str=(UserName, "Change Password for User: %s", ", %s") ),
rec( -1, (1,32), Password ),
rec( -1, (1,32), NewPassword ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xd600, 0xf001, 0xf101, 0xf501,
0xfc06, 0xfe07, 0xff00])
# 2222/1702, 23/02
pkt = NCP(0x1702, "Get User Connection List", 'connection')
pkt.Request( (11, 26), [
rec( 10, (1,16), UserName, info_str=(UserName, "Get User Connection: %s", ", %s") ),
])
pkt.Reply( (9, 136), [
rec( 8, (1, 128), ConnectionNumberList ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00])
# 2222/1703, 23/03
pkt = NCP(0x1703, "Get User Number", 'bindery')
pkt.Request( (11, 26), [
rec( 10, (1,16), UserName, info_str=(UserName, "Get User Number: %s", ", %s") ),
])
pkt.Reply( 12, [
rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00])
# 2222/1705, 23/05
pkt = NCP(0x1705, "Get Station's Logged Info", 'connection')
pkt.Request( 11, [
rec( 10, 1, TargetConnectionNumber, info_str=(TargetConnectionNumber, "Get Station's Logged Information on Connection %d", ", %d") ),
])
pkt.Reply( 266, [
rec( 8, 16, UserName16 ),
rec( 24, 7, LoginTime ),
rec( 31, 39, FullName ),
rec( 70, 4, UserID, ENC_BIG_ENDIAN ),
rec( 74, 128, SecurityEquivalentList ),
rec( 202, 64, Reserved64 ),
])
pkt.CompletionCodes([0x0000, 0x9602, 0xfc06, 0xfd00, 0xfe07, 0xff00])
# 2222/1707, 23/07
pkt = NCP(0x1707, "Get Group Number", 'bindery')
pkt.Request( 14, [
rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
])
pkt.Reply( 62, [
rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ),
rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 14, 48, ObjectNameLen ),
])
pkt.CompletionCodes([0x0000, 0x9602, 0xf101, 0xfc06, 0xfe07, 0xff00])
# 2222/170C, 23/12
pkt = NCP(0x170C, "Verify Serialization", 'fileserver')
pkt.Request( 14, [
rec( 10, 4, ServerSerialNumber ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xff00])
# 2222/170D, 23/13
pkt = NCP(0x170D, "Log Network Message", 'file')
pkt.Request( (11, 68), [
rec( 10, (1, 58), TargetMessage, info_str=(TargetMessage, "Log Network Message: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8100, 0x8800, 0x8d00, 0x8e00, 0x8f00,
0x9001, 0x9400, 0x9600, 0x9804, 0x9900, 0x9b00, 0xa100,
0xa201, 0xff00])
# 2222/170E, 23/14
pkt = NCP(0x170E, "Get Disk Utilization", 'fileserver')
pkt.Request( 15, [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, TrusteeID, ENC_BIG_ENDIAN ),
])
pkt.Reply( 19, [
rec( 8, 1, VolumeNumber ),
rec( 9, 4, TrusteeID, ENC_BIG_ENDIAN ),
rec( 13, 2, DirectoryCount, ENC_BIG_ENDIAN ),
rec( 15, 2, FileCount, ENC_BIG_ENDIAN ),
rec( 17, 2, ClusterCount, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0xa100, 0xf200])
# 2222/170F, 23/15
pkt = NCP(0x170F, "Scan File Information", 'file')
pkt.Request((15,269), [
rec( 10, 2, LastSearchIndex ),
rec( 12, 1, DirHandle ),
rec( 13, 1, SearchAttributes ),
rec( 14, (1, 255), FileName, info_str=(FileName, "Scan File Information: %s", ", %s") ),
])
pkt.Reply( 102, [
rec( 8, 2, NextSearchIndex ),
rec( 10, 14, FileName14 ),
rec( 24, 2, AttributesDef16 ),
rec( 26, 4, FileSize, ENC_BIG_ENDIAN ),
rec( 30, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 32, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 34, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 36, 2, ModifiedTime, ENC_BIG_ENDIAN ),
rec( 38, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 42, 2, ArchivedDate, ENC_BIG_ENDIAN ),
rec( 44, 2, ArchivedTime, ENC_BIG_ENDIAN ),
rec( 46, 56, Reserved56 ),
])
pkt.CompletionCodes([0x0000, 0x8800, 0x8900, 0x9300, 0x9400, 0x9804, 0x9b00, 0x9c00,
0xa100, 0xfd00, 0xff17])
# 2222/1710, 23/16
pkt = NCP(0x1710, "Set File Information", 'file')
pkt.Request((91,345), [
rec( 10, 2, AttributesDef16 ),
rec( 12, 4, FileSize, ENC_BIG_ENDIAN ),
rec( 16, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 18, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 20, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 22, 2, ModifiedTime, ENC_BIG_ENDIAN ),
rec( 24, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 28, 2, ArchivedDate, ENC_BIG_ENDIAN ),
rec( 30, 2, ArchivedTime, ENC_BIG_ENDIAN ),
rec( 32, 56, Reserved56 ),
rec( 88, 1, DirHandle ),
rec( 89, 1, SearchAttributes ),
rec( 90, (1, 255), FileName, info_str=(FileName, "Set Information for File: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8800, 0x8c00, 0x8e00, 0x9400, 0x9600, 0x9804,
0x9b03, 0x9c00, 0xa100, 0xa201, 0xfc06, 0xfd00, 0xfe07,
0xff17])
# 2222/1711, 23/17
pkt = NCP(0x1711, "Get File Server Information", 'fileserver')
pkt.Request(10)
pkt.Reply(136, [
rec( 8, 48, ServerName ),
rec( 56, 1, OSMajorVersion ),
rec( 57, 1, OSMinorVersion ),
rec( 58, 2, ConnectionsSupportedMax, ENC_BIG_ENDIAN ),
rec( 60, 2, ConnectionsInUse, ENC_BIG_ENDIAN ),
rec( 62, 2, VolumesSupportedMax, ENC_BIG_ENDIAN ),
rec( 64, 1, OSRevision ),
rec( 65, 1, SFTSupportLevel ),
rec( 66, 1, TTSLevel ),
rec( 67, 2, ConnectionsMaxUsed, ENC_BIG_ENDIAN ),
rec( 69, 1, AccountVersion ),
rec( 70, 1, VAPVersion ),
rec( 71, 1, QueueingVersion ),
rec( 72, 1, PrintServerVersion ),
rec( 73, 1, VirtualConsoleVersion ),
rec( 74, 1, SecurityRestrictionVersion ),
rec( 75, 1, InternetBridgeVersion ),
rec( 76, 1, MixedModePathFlag ),
rec( 77, 1, LocalLoginInfoCcode ),
rec( 78, 2, ProductMajorVersion, ENC_BIG_ENDIAN ),
rec( 80, 2, ProductMinorVersion, ENC_BIG_ENDIAN ),
rec( 82, 2, ProductRevisionVersion, ENC_BIG_ENDIAN ),
rec( 84, 1, OSLanguageID, ENC_LITTLE_ENDIAN ),
rec( 85, 1, SixtyFourBitOffsetsSupportedFlag ),
rec( 86, 1, OESServer ),
rec( 87, 1, OESLinuxOrNetWare ),
rec( 88, 48, Reserved48 ),
])
pkt.MakeExpert("ncp1711_reply")
pkt.CompletionCodes([0x0000, 0x9600])
# 2222/1712, 23/18
pkt = NCP(0x1712, "Get Network Serial Number", 'fileserver')
pkt.Request(10)
pkt.Reply(14, [
rec( 8, 4, ServerSerialNumber ),
rec( 12, 2, ApplicationNumber ),
])
pkt.CompletionCodes([0x0000, 0x9600])
# 2222/1713, 23/19
pkt = NCP(0x1713, "Get Internet Address", 'connection')
pkt.Request(11, [
rec( 10, 1, TargetConnectionNumber, info_str=(TargetConnectionNumber, "Get Internet Address for Connection %d", ", %d") ),
])
pkt.Reply(20, [
rec( 8, 4, NetworkAddress, ENC_BIG_ENDIAN ),
rec( 12, 6, NetworkNodeAddress ),
rec( 18, 2, NetworkSocket, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0xff00])
# 2222/1714, 23/20
pkt = NCP(0x1714, "Login Object", 'connection')
pkt.Request( (14, 60), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,16), ClientName, info_str=(ClientName, "Login Object: %s", ", %s") ),
rec( -1, (1,32), Password ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9602, 0xc101, 0xc200, 0xc501, 0xd600, 0xd700,
0xd900, 0xda00, 0xdb00, 0xde00, 0xdf00, 0xe800, 0xec00,
0xed00, 0xef00, 0xf001, 0xf100, 0xf200, 0xf600, 0xfb00,
0xfc06, 0xfe07, 0xff00])
# 2222/1715, 23/21
pkt = NCP(0x1715, "Get Object Connection List", 'connection')
pkt.Request( (13, 28), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,16), ObjectName, info_str=(ObjectName, "Get Object Connection List: %s", ", %s") ),
])
pkt.Reply( (9, 136), [
rec( 8, (1, 128), ConnectionNumberList ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00])
# 2222/1716, 23/22
pkt = NCP(0x1716, "Get Station's Logged Info", 'connection')
pkt.Request( 11, [
rec( 10, 1, TargetConnectionNumber ),
])
pkt.Reply( 70, [
rec( 8, 4, UserID, ENC_BIG_ENDIAN ),
rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 14, 48, ObjectNameLen ),
rec( 62, 7, LoginTime ),
rec( 69, 1, Reserved ),
])
pkt.CompletionCodes([0x0000, 0x9602, 0xfb0a, 0xfc06, 0xfd00, 0xfe07, 0xff00])
# 2222/1717, 23/23
pkt = NCP(0x1717, "Get Login Key", 'connection')
pkt.Request(10)
pkt.Reply( 16, [
rec( 8, 8, LoginKey ),
])
pkt.CompletionCodes([0x0000, 0x9602])
# 2222/1718, 23/24
pkt = NCP(0x1718, "Keyed Object Login", 'connection')
pkt.Request( (21, 68), [
rec( 10, 8, LoginKey ),
rec( 18, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 20, (1,48), ObjectName, info_str=(ObjectName, "Keyed Object Login: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9602, 0xc101, 0xc200, 0xc500, 0xd904, 0xda00,
0xdb00, 0xdc00, 0xde00, 0xff00])
# 2222/171A, 23/26
pkt = NCP(0x171A, "Get Internet Address", 'connection')
pkt.Request(12, [
rec( 10, 2, TargetConnectionNumber ),
])
# Dissect reply in packet-ncp2222.inc
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/171B, 23/27
pkt = NCP(0x171B, "Get Object Connection List", 'connection')
pkt.Request( (17,64), [
rec( 10, 4, SearchConnNumber ),
rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Get Object Connection List: %s", ", %s") ),
])
pkt.Reply( (13), [
rec( 8, 1, ConnListLen, var="x" ),
rec( 9, 4, ConnectionNumber, ENC_LITTLE_ENDIAN, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00])
# 2222/171C, 23/28
pkt = NCP(0x171C, "Get Station's Logged Info", 'connection')
pkt.Request( 14, [
rec( 10, 4, TargetConnectionNumber ),
])
pkt.Reply( 70, [
rec( 8, 4, UserID, ENC_BIG_ENDIAN ),
rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 14, 48, ObjectNameLen ),
rec( 62, 7, LoginTime ),
rec( 69, 1, Reserved ),
])
pkt.CompletionCodes([0x0000, 0x7d00, 0x9602, 0xfb02, 0xfc06, 0xfd00, 0xfe07, 0xff00])
# 2222/171D, 23/29
pkt = NCP(0x171D, "Change Connection State", 'connection')
pkt.Request( 11, [
rec( 10, 1, RequestCode ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0109, 0x7a00, 0x7b00, 0x7c00, 0xe000, 0xfb06, 0xfd00])
# 2222/171E, 23/30
pkt = NCP(0x171E, "Set Watchdog Delay Interval", 'connection')
pkt.Request( 14, [
rec( 10, 4, NumberOfMinutesToDelay ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0107])
# 2222/171F, 23/31
pkt = NCP(0x171F, "Get Connection List From Object", 'connection')
pkt.Request( 18, [
rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
rec( 14, 4, ConnectionNumber ),
])
pkt.Reply( (9, 136), [
rec( 8, (1, 128), ConnectionNumberList ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xf001, 0xfc06, 0xfe07, 0xff00])
# 2222/1720, 23/32
pkt = NCP(0x1720, "Scan Bindery Object (List)", 'bindery')
pkt.Request((23,70), [
rec( 10, 4, NextObjectID, ENC_BIG_ENDIAN ),
rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 16, 2, Reserved2 ),
rec( 18, 4, InfoFlags ),
rec( 22, (1,48), ObjectName, info_str=(ObjectName, "Scan Bindery Object: %s", ", %s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 4, ObjectInfoReturnCount ),
rec( 12, 4, NextObjectID, ENC_BIG_ENDIAN ),
rec( 16, 4, ObjectID ),
srec(ObjectTypeStruct, req_cond="ncp.info_flags_type == TRUE"),
srec(ObjectSecurityStruct, req_cond="ncp.info_flags_security == TRUE"),
srec(ObjectFlagsStruct, req_cond="ncp.info_flags_flags == TRUE"),
srec(ObjectNameStruct, req_cond="ncp.info_flags_name == TRUE"),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x9600, 0xef01, 0xfc02, 0xfe01, 0xff00])
# 2222/1721, 23/33
pkt = NCP(0x1721, "Generate GUIDs", 'connection')
pkt.Request( 14, [
rec( 10, 4, ReturnInfoCount ),
])
pkt.Reply(28, [
rec( 8, 4, ReturnInfoCount, var="x" ),
rec( 12, 16, GUID, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7e01])
# 2222/1722, 23/34
pkt = NCP(0x1722, "Set Connection Language Encoding", 'connection')
pkt.Request( 22, [
rec( 10, 4, SetMask ),
rec( 14, 4, NCPEncodedStringsBits ),
rec( 18, 4, CodePage ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/1732, 23/50
pkt = NCP(0x1732, "Create Bindery Object", 'bindery')
pkt.Request( (15,62), [
rec( 10, 1, ObjectFlags ),
rec( 11, 1, ObjectSecurity ),
rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 14, (1,48), ObjectName, info_str=(ObjectName, "Create Bindery Object: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xe700, 0xee00, 0xef00, 0xf101, 0xf501,
0xfc06, 0xfe07, 0xff00])
# 2222/1733, 23/51
pkt = NCP(0x1733, "Delete Bindery Object", 'bindery')
pkt.Request( (13,60), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Delete Bindery Object: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf200, 0xf400, 0xf600, 0xfb00,
0xfc06, 0xfe07, 0xff00])
# 2222/1734, 23/52
pkt = NCP(0x1734, "Rename Bindery Object", 'bindery')
pkt.Request( (14,108), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Rename Bindery Object: %s", ", %s") ),
rec( -1, (1,48), NewObjectName ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xee00, 0xf000, 0xf300, 0xfc06, 0xfe07, 0xff00])
# 2222/1735, 23/53
pkt = NCP(0x1735, "Get Bindery Object ID", 'bindery')
pkt.Request((13,60), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Get Bindery Object: %s", ", %s") ),
])
pkt.Reply(62, [
rec( 8, 4, ObjectID, ENC_LITTLE_ENDIAN ),
rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 14, 48, ObjectNameLen ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xef01, 0xf000, 0xfc02, 0xfe01, 0xff00])
# 2222/1736, 23/54
pkt = NCP(0x1736, "Get Bindery Object Name", 'bindery')
pkt.Request( 14, [
rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
])
pkt.Reply( 62, [
rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ),
rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 14, 48, ObjectNameLen ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xf101, 0xfc02, 0xfe01, 0xff00])
# 2222/1737, 23/55
pkt = NCP(0x1737, "Scan Bindery Object", 'bindery')
pkt.Request((17,64), [
rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Scan Bindery Object: %s", ", %s") ),
])
pkt.Reply(65, [
rec( 8, 4, ObjectID, ENC_BIG_ENDIAN ),
rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 14, 48, ObjectNameLen ),
rec( 62, 1, ObjectFlags ),
rec( 63, 1, ObjectSecurity ),
rec( 64, 1, ObjectHasProperties ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xef01, 0xfc02,
0xfe01, 0xff00])
# 2222/1738, 23/56
pkt = NCP(0x1738, "Change Bindery Object Security", 'bindery')
pkt.Request((14,61), [
rec( 10, 1, ObjectSecurity ),
rec( 11, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 13, (1,48), ObjectName, info_str=(ObjectName, "Change Bindery Object Security: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf501, 0xfc02, 0xfe01, 0xff00])
# 2222/1739, 23/57
pkt = NCP(0x1739, "Create Property", 'bindery')
pkt.Request((16,78), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName ),
rec( -1, 1, PropertyType ),
rec( -1, 1, ObjectSecurity ),
rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Create Property: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xed00, 0xef00, 0xf000, 0xf101,
0xf200, 0xf600, 0xf700, 0xfb00, 0xfc02, 0xfe01,
0xff00])
# 2222/173A, 23/58
pkt = NCP(0x173A, "Delete Property", 'bindery')
pkt.Request((14,76), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName ),
rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Delete Property: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf600, 0xfb00, 0xfc02,
0xfe01, 0xff00])
# 2222/173B, 23/59
pkt = NCP(0x173B, "Change Property Security", 'bindery')
pkt.Request((15,77), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName ),
rec( -1, 1, ObjectSecurity ),
rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Change Property Security: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf200, 0xf600, 0xfb00,
0xfc02, 0xfe01, 0xff00])
# 2222/173C, 23/60
pkt = NCP(0x173C, "Scan Property", 'bindery')
pkt.Request((18,80), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName ),
rec( -1, 4, LastInstance, ENC_BIG_ENDIAN ),
rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Scan Property: %s", ", %s") ),
])
pkt.Reply( 32, [
rec( 8, 16, PropertyName16 ),
rec( 24, 1, ObjectFlags ),
rec( 25, 1, ObjectSecurity ),
rec( 26, 4, SearchInstance, ENC_BIG_ENDIAN ),
rec( 30, 1, ValueAvailable ),
rec( 31, 1, MoreProperties ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xf000, 0xf101, 0xf200, 0xf600, 0xfb00,
0xfc02, 0xfe01, 0xff00])
# 2222/173D, 23/61
pkt = NCP(0x173D, "Read Property Value", 'bindery')
pkt.Request((15,77), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName ),
rec( -1, 1, PropertySegment ),
rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Read Property Value: %s", ", %s") ),
])
pkt.Reply(138, [
rec( 8, 128, PropertyData ),
rec( 136, 1, PropertyHasMoreSegments ),
rec( 137, 1, PropertyType ),
])
pkt.CompletionCodes([0x0000, 0x8800, 0x9300, 0x9600, 0xec01,
0xf000, 0xf100, 0xf900, 0xfb02, 0xfc02,
0xfe01, 0xff00])
# 2222/173E, 23/62
pkt = NCP(0x173E, "Write Property Value", 'bindery')
pkt.Request((144,206), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName ),
rec( -1, 1, PropertySegment ),
rec( -1, 1, MoreFlag ),
rec( -1, (1,16), PropertyName, info_str=(PropertyName, "Write Property Value: %s", ", %s") ),
#
# XXX - don't show this if MoreFlag isn't set?
# In at least some packages where it's not set,
# PropertyValue appears to be garbage.
#
rec( -1, 128, PropertyValue ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xe800, 0xec01, 0xf000, 0xf800,
0xfb02, 0xfc03, 0xfe01, 0xff00 ])
# 2222/173F, 23/63
pkt = NCP(0x173F, "Verify Bindery Object Password", 'bindery')
pkt.Request((14,92), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Verify Bindery Object Password: %s", ", %s") ),
rec( -1, (1,32), Password ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xe800, 0xec01, 0xf000, 0xf101,
0xfb02, 0xfc03, 0xfe01, 0xff00 ])
# 2222/1740, 23/64
pkt = NCP(0x1740, "Change Bindery Object Password", 'bindery')
pkt.Request((15,124), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Change Bindery Object Password: %s", ", %s") ),
rec( -1, (1,32), Password ),
rec( -1, (1,32), NewPassword ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xc501, 0xd701, 0xe800, 0xec01, 0xf001,
0xf100, 0xf800, 0xfb02, 0xfc03, 0xfe01, 0xff00])
# 2222/1741, 23/65
pkt = NCP(0x1741, "Add Bindery Object To Set", 'bindery')
pkt.Request((17,126), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName ),
rec( -1, (1,16), PropertyName ),
rec( -1, 2, MemberType, ENC_BIG_ENDIAN ),
rec( -1, (1,48), MemberName, info_str=(MemberName, "Add Bindery Object to Set: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xe800, 0xe900, 0xea00, 0xeb00,
0xec01, 0xf000, 0xf800, 0xfb02, 0xfc03, 0xfe01,
0xff00])
# 2222/1742, 23/66
pkt = NCP(0x1742, "Delete Bindery Object From Set", 'bindery')
pkt.Request((17,126), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName ),
rec( -1, (1,16), PropertyName ),
rec( -1, 2, MemberType, ENC_BIG_ENDIAN ),
rec( -1, (1,48), MemberName, info_str=(MemberName, "Delete Bindery Object from Set: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xeb00, 0xf000, 0xf800, 0xfb02,
0xfc03, 0xfe01, 0xff00])
# 2222/1743, 23/67
pkt = NCP(0x1743, "Is Bindery Object In Set", 'bindery')
pkt.Request((17,126), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName ),
rec( -1, (1,16), PropertyName ),
rec( -1, 2, MemberType, ENC_BIG_ENDIAN ),
rec( -1, (1,48), MemberName, info_str=(MemberName, "Is Bindery Object in Set: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xea00, 0xeb00, 0xec01, 0xf000,
0xfb02, 0xfc03, 0xfe01, 0xff00])
# 2222/1744, 23/68
pkt = NCP(0x1744, "Close Bindery", 'bindery')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xff00])
# 2222/1745, 23/69
pkt = NCP(0x1745, "Open Bindery", 'bindery')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xff00])
# 2222/1746, 23/70
pkt = NCP(0x1746, "Get Bindery Access Level", 'bindery')
pkt.Request(10)
pkt.Reply(13, [
rec( 8, 1, ObjectSecurity ),
rec( 9, 4, LoggedObjectID, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9600])
# 2222/1747, 23/71
pkt = NCP(0x1747, "Scan Bindery Object Trustee Paths", 'bindery')
pkt.Request(17, [
rec( 10, 1, VolumeNumber ),
rec( 11, 2, LastSequenceNumber, ENC_BIG_ENDIAN ),
rec( 13, 4, ObjectID, ENC_BIG_ENDIAN ),
])
pkt.Reply((16,270), [
rec( 8, 2, LastSequenceNumber, ENC_BIG_ENDIAN),
rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
rec( 14, 1, ObjectSecurity ),
rec( 15, (1,255), Path ),
])
pkt.CompletionCodes([0x0000, 0x9300, 0x9600, 0xa100, 0xf000, 0xf100,
0xf200, 0xfc02, 0xfe01, 0xff00])
# 2222/1748, 23/72
pkt = NCP(0x1748, "Get Bindery Object Access Level", 'bindery')
pkt.Request(14, [
rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
])
pkt.Reply(9, [
rec( 8, 1, ObjectSecurity ),
])
pkt.CompletionCodes([0x0000, 0x9600])
# 2222/1749, 23/73
pkt = NCP(0x1749, "Is Calling Station a Manager", 'bindery')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0003, 0xff1e])
# 2222/174A, 23/74
pkt = NCP(0x174A, "Keyed Verify Password", 'bindery')
pkt.Request((21,68), [
rec( 10, 8, LoginKey ),
rec( 18, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 20, (1,48), ObjectName, info_str=(ObjectName, "Keyed Verify Password: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc500, 0xfe01, 0xff0c])
# 2222/174B, 23/75
pkt = NCP(0x174B, "Keyed Change Password", 'bindery')
pkt.Request((22,100), [
rec( 10, 8, LoginKey ),
rec( 18, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 20, (1,48), ObjectName, info_str=(ObjectName, "Keyed Change Password: %s", ", %s") ),
rec( -1, (1,32), Password ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc500, 0xfe01, 0xff0c])
# 2222/174C, 23/76
pkt = NCP(0x174C, "List Relations Of an Object", 'bindery')
pkt.Request((18,80), [
rec( 10, 4, LastSeen, ENC_BIG_ENDIAN ),
rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 16, (1,48), ObjectName, info_str=(ObjectName, "List Relations of an Object: %s", ", %s") ),
rec( -1, (1,16), PropertyName ),
])
pkt.Reply(14, [
rec( 8, 2, RelationsCount, ENC_BIG_ENDIAN, var="x" ),
rec( 10, 4, ObjectID, ENC_BIG_ENDIAN, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0xf000, 0xf200, 0xfe01, 0xff00])
# 2222/1764, 23/100
pkt = NCP(0x1764, "Create Queue", 'qms')
pkt.Request((15,316), [
rec( 10, 2, QueueType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), QueueName, info_str=(QueueName, "Create Queue: %s", ", %s") ),
rec( -1, 1, PathBase ),
rec( -1, (1,255), Path ),
])
pkt.Reply(12, [
rec( 8, 4, QueueID ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9900, 0xd000, 0xd100,
0xd200, 0xd300, 0xd400, 0xd500, 0xd601,
0xd703, 0xd800, 0xd902, 0xda01, 0xdb02,
0xee00, 0xff00])
# 2222/1765, 23/101
pkt = NCP(0x1765, "Destroy Queue", 'qms')
pkt.Request(14, [
rec( 10, 4, QueueID ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1766, 23/102
pkt = NCP(0x1766, "Read Queue Current Status", 'qms')
pkt.Request(14, [
rec( 10, 4, QueueID ),
])
pkt.Reply(20, [
rec( 8, 4, QueueID ),
rec( 12, 1, QueueStatus ),
rec( 13, 1, CurrentEntries ),
rec( 14, 1, CurrentServers, var="x" ),
rec( 15, 4, ServerID, repeat="x" ),
rec( 19, 1, ServerStationList, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1767, 23/103
pkt = NCP(0x1767, "Set Queue Current Status", 'qms')
pkt.Request(15, [
rec( 10, 4, QueueID ),
rec( 14, 1, QueueStatus ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07,
0xff00])
# 2222/1768, 23/104
pkt = NCP(0x1768, "Create Queue Job And File", 'qms')
pkt.Request(264, [
rec( 10, 4, QueueID ),
rec( 14, 250, JobStruct ),
])
pkt.Reply(62, [
rec( 8, 1, ClientStation ),
rec( 9, 1, ClientTaskNumber ),
rec( 10, 4, ClientIDNumber, ENC_BIG_ENDIAN ),
rec( 14, 4, TargetServerIDNumber, ENC_BIG_ENDIAN ),
rec( 18, 6, TargetExecutionTime ),
rec( 24, 6, JobEntryTime ),
rec( 30, 2, JobNumber, ENC_BIG_ENDIAN ),
rec( 32, 2, JobType, ENC_BIG_ENDIAN ),
rec( 34, 1, JobPosition ),
rec( 35, 1, JobControlFlags ),
rec( 36, 14, JobFileName ),
rec( 50, 6, JobFileHandle ),
rec( 56, 1, ServerStation ),
rec( 57, 1, ServerTaskNumber ),
rec( 58, 4, ServerID, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07,
0xff00])
# 2222/1769, 23/105
pkt = NCP(0x1769, "Close File And Start Queue Job", 'qms')
pkt.Request(16, [
rec( 10, 4, QueueID ),
rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/176A, 23/106
pkt = NCP(0x176A, "Remove Job From Queue", 'qms')
pkt.Request(16, [
rec( 10, 4, QueueID ),
rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/176B, 23/107
pkt = NCP(0x176B, "Get Queue Job List", 'qms')
pkt.Request(14, [
rec( 10, 4, QueueID ),
])
pkt.Reply(12, [
rec( 8, 2, JobCount, ENC_BIG_ENDIAN, var="x" ),
rec( 10, 2, JobNumber, ENC_BIG_ENDIAN, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/176C, 23/108
pkt = NCP(0x176C, "Read Queue Job Entry", 'qms')
pkt.Request(16, [
rec( 10, 4, QueueID ),
rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
])
pkt.Reply(258, [
rec( 8, 250, JobStruct ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/176D, 23/109
pkt = NCP(0x176D, "Change Queue Job Entry", 'qms')
pkt.Request(260, [
rec( 14, 250, JobStruct ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff18])
# 2222/176E, 23/110
pkt = NCP(0x176E, "Change Queue Job Position", 'qms')
pkt.Request(17, [
rec( 10, 4, QueueID ),
rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
rec( 16, 1, NewPosition ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xd000, 0xd100, 0xd300, 0xd500,
0xd601, 0xfe07, 0xff1f])
# 2222/176F, 23/111
pkt = NCP(0x176F, "Attach Queue Server To Queue", 'qms')
pkt.Request(14, [
rec( 10, 4, QueueID ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xea00,
0xfc06, 0xff00])
# 2222/1770, 23/112
pkt = NCP(0x1770, "Detach Queue Server From Queue", 'qms')
pkt.Request(14, [
rec( 10, 4, QueueID ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1771, 23/113
pkt = NCP(0x1771, "Service Queue Job", 'qms')
pkt.Request(16, [
rec( 10, 4, QueueID ),
rec( 14, 2, ServiceType, ENC_BIG_ENDIAN ),
])
pkt.Reply(62, [
rec( 8, 1, ClientStation ),
rec( 9, 1, ClientTaskNumber ),
rec( 10, 4, ClientIDNumber, ENC_BIG_ENDIAN ),
rec( 14, 4, TargetServerIDNumber, ENC_BIG_ENDIAN ),
rec( 18, 6, TargetExecutionTime ),
rec( 24, 6, JobEntryTime ),
rec( 30, 2, JobNumber, ENC_BIG_ENDIAN ),
rec( 32, 2, JobType, ENC_BIG_ENDIAN ),
rec( 34, 1, JobPosition ),
rec( 35, 1, JobControlFlags ),
rec( 36, 14, JobFileName ),
rec( 50, 6, JobFileHandle ),
rec( 56, 1, ServerStation ),
rec( 57, 1, ServerTaskNumber ),
rec( 58, 4, ServerID, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1772, 23/114
pkt = NCP(0x1772, "Finish Servicing Queue Job", 'qms')
pkt.Request(18, [
rec( 10, 4, QueueID ),
rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
rec( 16, 2, ChargeInformation, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00])
# 2222/1773, 23/115
pkt = NCP(0x1773, "Abort Servicing Queue Job", 'qms')
pkt.Request(16, [
rec( 10, 4, QueueID ),
rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff18])
# 2222/1774, 23/116
pkt = NCP(0x1774, "Change To Client Rights", 'qms')
pkt.Request(16, [
rec( 10, 4, QueueID ),
rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff18])
# 2222/1775, 23/117
pkt = NCP(0x1775, "Restore Queue Server Rights", 'qms')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1776, 23/118
pkt = NCP(0x1776, "Read Queue Server Current Status", 'qms')
pkt.Request(19, [
rec( 10, 4, QueueID ),
rec( 14, 4, ServerID, ENC_BIG_ENDIAN ),
rec( 18, 1, ServerStation ),
])
pkt.Reply(72, [
rec( 8, 64, ServerStatusRecord ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1777, 23/119
pkt = NCP(0x1777, "Set Queue Server Current Status", 'qms')
pkt.Request(78, [
rec( 10, 4, QueueID ),
rec( 14, 64, ServerStatusRecord ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1778, 23/120
pkt = NCP(0x1778, "Get Queue Job File Size", 'qms')
pkt.Request(16, [
rec( 10, 4, QueueID ),
rec( 14, 2, JobNumber, ENC_BIG_ENDIAN ),
])
pkt.Reply(18, [
rec( 8, 4, QueueID ),
rec( 12, 2, JobNumber, ENC_BIG_ENDIAN ),
rec( 14, 4, FileSize, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00])
# 2222/1779, 23/121
pkt = NCP(0x1779, "Create Queue Job And File", 'qms')
pkt.Request(264, [
rec( 10, 4, QueueID ),
rec( 14, 250, JobStruct3x ),
])
pkt.Reply(94, [
rec( 8, 86, JobStructNew ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00])
# 2222/177A, 23/122
pkt = NCP(0x177A, "Read Queue Job Entry", 'qms')
pkt.Request(18, [
rec( 10, 4, QueueID ),
rec( 14, 4, JobNumberLong ),
])
pkt.Reply(258, [
rec( 8, 250, JobStruct3x ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/177B, 23/123
pkt = NCP(0x177B, "Change Queue Job Entry", 'qms')
pkt.Request(264, [
rec( 10, 4, QueueID ),
rec( 14, 250, JobStruct ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xea02, 0xfc07, 0xff00])
# 2222/177C, 23/124
pkt = NCP(0x177C, "Service Queue Job", 'qms')
pkt.Request(16, [
rec( 10, 4, QueueID ),
rec( 14, 2, ServiceType ),
])
pkt.Reply(94, [
rec( 8, 86, JobStructNew ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff00])
# 2222/177D, 23/125
pkt = NCP(0x177D, "Read Queue Current Status", 'qms')
pkt.Request(14, [
rec( 10, 4, QueueID ),
])
pkt.Reply(32, [
rec( 8, 4, QueueID ),
rec( 12, 1, QueueStatus ),
rec( 13, 3, Reserved3 ),
rec( 16, 4, CurrentEntries ),
rec( 20, 4, CurrentServers, var="x" ),
rec( 24, 4, ServerID, repeat="x" ),
rec( 28, 4, ServerStationLong, ENC_LITTLE_ENDIAN, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/177E, 23/126
pkt = NCP(0x177E, "Set Queue Current Status", 'qms')
pkt.Request(15, [
rec( 10, 4, QueueID ),
rec( 14, 1, QueueStatus ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/177F, 23/127
pkt = NCP(0x177F, "Close File And Start Queue Job", 'qms')
pkt.Request(18, [
rec( 10, 4, QueueID ),
rec( 14, 4, JobNumberLong ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc07, 0xff00])
# 2222/1780, 23/128
pkt = NCP(0x1780, "Remove Job From Queue", 'qms')
pkt.Request(18, [
rec( 10, 4, QueueID ),
rec( 14, 4, JobNumberLong ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1781, 23/129
pkt = NCP(0x1781, "Get Queue Job List", 'qms')
pkt.Request(18, [
rec( 10, 4, QueueID ),
rec( 14, 4, JobNumberLong ),
])
pkt.Reply(20, [
rec( 8, 4, TotalQueueJobs ),
rec( 12, 4, ReplyQueueJobNumbers, var="x" ),
rec( 16, 4, JobNumberLong, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1782, 23/130
pkt = NCP(0x1782, "Change Job Priority", 'qms')
pkt.Request(22, [
rec( 10, 4, QueueID ),
rec( 14, 4, JobNumberLong ),
rec( 18, 4, Priority ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1783, 23/131
pkt = NCP(0x1783, "Finish Servicing Queue Job", 'qms')
pkt.Request(22, [
rec( 10, 4, QueueID ),
rec( 14, 4, JobNumberLong ),
rec( 18, 4, ChargeInformation ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff00])
# 2222/1784, 23/132
pkt = NCP(0x1784, "Abort Servicing Queue Job", 'qms')
pkt.Request(18, [
rec( 10, 4, QueueID ),
rec( 14, 4, JobNumberLong ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff18])
# 2222/1785, 23/133
pkt = NCP(0x1785, "Change To Client Rights", 'qms')
pkt.Request(18, [
rec( 10, 4, QueueID ),
rec( 14, 4, JobNumberLong ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff18])
# 2222/1786, 23/134
pkt = NCP(0x1786, "Read Queue Server Current Status", 'qms')
pkt.Request(22, [
rec( 10, 4, QueueID ),
rec( 14, 4, ServerID, ENC_BIG_ENDIAN ),
rec( 18, 4, ServerStation ),
])
pkt.Reply(72, [
rec( 8, 64, ServerStatusRecord ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xff00])
# 2222/1787, 23/135
pkt = NCP(0x1787, "Get Queue Job File Size", 'qms')
pkt.Request(18, [
rec( 10, 4, QueueID ),
rec( 14, 4, JobNumberLong ),
])
pkt.Reply(20, [
rec( 8, 4, QueueID ),
rec( 12, 4, JobNumberLong ),
rec( 16, 4, FileSize, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9900, 0xd000, 0xd100, 0xd200,
0xd300, 0xd400, 0xd500, 0xd601, 0xd703,
0xd800, 0xd902, 0xda01, 0xdb02, 0xfc05, 0xff00])
# 2222/1788, 23/136
pkt = NCP(0x1788, "Move Queue Job From Src Q to Dst Q", 'qms')
pkt.Request(22, [
rec( 10, 4, QueueID ),
rec( 14, 4, JobNumberLong ),
rec( 18, 4, DstQueueID ),
])
pkt.Reply(12, [
rec( 8, 4, JobNumberLong ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfc06])
# 2222/1789, 23/137
pkt = NCP(0x1789, "Get Queue Jobs From Form List", 'qms')
pkt.Request(24, [
rec( 10, 4, QueueID ),
rec( 14, 4, QueueStartPosition ),
rec( 18, 4, FormTypeCnt, ENC_LITTLE_ENDIAN, var="x" ),
rec( 22, 2, FormType, repeat="x" ),
])
pkt.Reply(20, [
rec( 8, 4, TotalQueueJobs ),
rec( 12, 4, JobCount, var="x" ),
rec( 16, 4, JobNumberLong, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xd300, 0xfc06])
# 2222/178A, 23/138
pkt = NCP(0x178A, "Service Queue Job By Form List", 'qms')
pkt.Request(24, [
rec( 10, 4, QueueID ),
rec( 14, 4, QueueStartPosition ),
rec( 18, 4, FormTypeCnt, ENC_LITTLE_ENDIAN, var= "x" ),
rec( 22, 2, FormType, repeat="x" ),
])
pkt.Reply(94, [
rec( 8, 86, JobStructNew ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xd902, 0xfc06, 0xff00])
# 2222/1796, 23/150
pkt = NCP(0x1796, "Get Current Account Status", 'accounting')
pkt.Request((13,60), [
rec( 10, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 12, (1,48), ObjectName, info_str=(ObjectName, "Get Current Account Status: %s", ", %s") ),
])
pkt.Reply(264, [
rec( 8, 4, AccountBalance, ENC_BIG_ENDIAN ),
rec( 12, 4, CreditLimit, ENC_BIG_ENDIAN ),
rec( 16, 120, Reserved120 ),
rec( 136, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 140, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 144, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 148, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 152, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 156, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 160, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 164, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 168, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 172, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 176, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 180, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 184, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 188, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 192, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 196, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 200, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 204, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 208, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 212, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 216, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 220, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 224, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 228, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 232, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 236, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 240, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 244, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 248, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 252, 4, HoldAmount, ENC_BIG_ENDIAN ),
rec( 256, 4, HolderID, ENC_BIG_ENDIAN ),
rec( 260, 4, HoldAmount, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc000, 0xc101, 0xc400, 0xe800,
0xea00, 0xeb00, 0xec00, 0xfc06, 0xfe07, 0xff00])
# 2222/1797, 23/151
pkt = NCP(0x1797, "Submit Account Charge", 'accounting')
pkt.Request((26,327), [
rec( 10, 2, ServiceType, ENC_BIG_ENDIAN ),
rec( 12, 4, ChargeAmount, ENC_BIG_ENDIAN ),
rec( 16, 4, HoldCancelAmount, ENC_BIG_ENDIAN ),
rec( 20, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 22, 2, CommentType, ENC_BIG_ENDIAN ),
rec( 24, (1,48), ObjectName, info_str=(ObjectName, "Submit Account Charge: %s", ", %s") ),
rec( -1, (1,255), Comment ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8800, 0x9400, 0x9600, 0xa201,
0xc000, 0xc101, 0xc200, 0xc400, 0xe800, 0xea00,
0xeb00, 0xec00, 0xfe07, 0xff00])
# 2222/1798, 23/152
pkt = NCP(0x1798, "Submit Account Hold", 'accounting')
pkt.Request((17,64), [
rec( 10, 4, HoldCancelAmount, ENC_BIG_ENDIAN ),
rec( 14, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Submit Account Hold: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8800, 0x9400, 0x9600, 0xa201,
0xc000, 0xc101, 0xc200, 0xc400, 0xe800, 0xea00,
0xeb00, 0xec00, 0xfe07, 0xff00])
# 2222/1799, 23/153
pkt = NCP(0x1799, "Submit Account Note", 'accounting')
pkt.Request((18,319), [
rec( 10, 2, ServiceType, ENC_BIG_ENDIAN ),
rec( 12, 2, ObjectType, ENC_BIG_ENDIAN ),
rec( 14, 2, CommentType, ENC_BIG_ENDIAN ),
rec( 16, (1,48), ObjectName, info_str=(ObjectName, "Submit Account Note: %s", ", %s") ),
rec( -1, (1,255), Comment ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x9600, 0xc000, 0xc101, 0xc400,
0xe800, 0xea00, 0xeb00, 0xec00, 0xf000, 0xfc06,
0xff00])
# 2222/17c8, 23/200
pkt = NCP(0x17c8, "Check Console Privileges", 'fileserver')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601])
# 2222/17c9, 23/201
pkt = NCP(0x17c9, "Get File Server Description Strings", 'fileserver')
pkt.Request(10)
pkt.Reply(108, [
rec( 8, 100, DescriptionStrings ),
])
pkt.CompletionCodes([0x0000, 0x9600])
# 2222/17CA, 23/202
pkt = NCP(0x17CA, "Set File Server Date And Time", 'fileserver')
pkt.Request(16, [
rec( 10, 1, Year ),
rec( 11, 1, Month ),
rec( 12, 1, Day ),
rec( 13, 1, Hour ),
rec( 14, 1, Minute ),
rec( 15, 1, Second ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601])
# 2222/17CB, 23/203
pkt = NCP(0x17CB, "Disable File Server Login", 'fileserver')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601])
# 2222/17CC, 23/204
pkt = NCP(0x17CC, "Enable File Server Login", 'fileserver')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601])
# 2222/17CD, 23/205
pkt = NCP(0x17CD, "Get File Server Login Status", 'fileserver')
pkt.Request(10)
pkt.Reply(9, [
rec( 8, 1, UserLoginAllowed ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xfb01])
# 2222/17CF, 23/207
pkt = NCP(0x17CF, "Disable Transaction Tracking", 'fileserver')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601])
# 2222/17D0, 23/208
pkt = NCP(0x17D0, "Enable Transaction Tracking", 'fileserver')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601])
# 2222/17D1, 23/209
pkt = NCP(0x17D1, "Send Console Broadcast", 'fileserver')
pkt.Request((13,267), [
rec( 10, 1, NumberOfStations, var="x" ),
rec( 11, 1, StationList, repeat="x" ),
rec( 12, (1, 255), TargetMessage, info_str=(TargetMessage, "Send Console Broadcast: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601, 0xfd00])
# 2222/17D2, 23/210
pkt = NCP(0x17D2, "Clear Connection Number", 'fileserver')
pkt.Request(11, [
rec( 10, 1, ConnectionNumber, info_str=(ConnectionNumber, "Clear Connection Number %d", ", %d") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601, 0xfd00])
# 2222/17D3, 23/211
pkt = NCP(0x17D3, "Down File Server", 'fileserver')
pkt.Request(11, [
rec( 10, 1, ForceFlag ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601, 0xff00])
# 2222/17D4, 23/212
pkt = NCP(0x17D4, "Get File System Statistics", 'fileserver')
pkt.Request(10)
pkt.Reply(50, [
rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
rec( 12, 2, ConfiguredMaxOpenFiles ),
rec( 14, 2, ActualMaxOpenFiles ),
rec( 16, 2, CurrentOpenFiles ),
rec( 18, 4, TotalFilesOpened ),
rec( 22, 4, TotalReadRequests ),
rec( 26, 4, TotalWriteRequests ),
rec( 30, 2, CurrentChangedFATs ),
rec( 32, 4, TotalChangedFATs ),
rec( 36, 2, FATWriteErrors ),
rec( 38, 2, FatalFATWriteErrors ),
rec( 40, 2, FATScanErrors ),
rec( 42, 2, ActualMaxIndexedFiles ),
rec( 44, 2, ActiveIndexedFiles ),
rec( 46, 2, AttachedIndexedFiles ),
rec( 48, 2, AvailableIndexedFiles ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
# 2222/17D5, 23/213
pkt = NCP(0x17D5, "Get Transaction Tracking Statistics", 'fileserver')
pkt.Request((13,267), [
rec( 10, 2, LastRecordSeen ),
rec( 12, (1,255), SemaphoreName ),
])
pkt.Reply(53, [
rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
rec( 12, 1, TransactionTrackingSupported ),
rec( 13, 1, TransactionTrackingEnabled ),
rec( 14, 2, TransactionVolumeNumber ),
rec( 16, 2, ConfiguredMaxSimultaneousTransactions ),
rec( 18, 2, ActualMaxSimultaneousTransactions ),
rec( 20, 2, CurrentTransactionCount ),
rec( 22, 4, TotalTransactionsPerformed ),
rec( 26, 4, TotalWriteTransactionsPerformed ),
rec( 30, 4, TotalTransactionsBackedOut ),
rec( 34, 2, TotalUnfilledBackoutRequests ),
rec( 36, 2, TransactionDiskSpace ),
rec( 38, 4, TransactionFATAllocations ),
rec( 42, 4, TransactionFileSizeChanges ),
rec( 46, 4, TransactionFilesTruncated ),
rec( 50, 1, NumberOfEntries, var="x" ),
rec( 51, 2, ConnTaskStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
# 2222/17D6, 23/214
pkt = NCP(0x17D6, "Read Disk Cache Statistics", 'fileserver')
pkt.Request(10)
pkt.Reply(86, [
rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
rec( 12, 2, CacheBufferCount ),
rec( 14, 2, CacheBufferSize ),
rec( 16, 2, DirtyCacheBuffers ),
rec( 18, 4, CacheReadRequests ),
rec( 22, 4, CacheWriteRequests ),
rec( 26, 4, CacheHits ),
rec( 30, 4, CacheMisses ),
rec( 34, 4, PhysicalReadRequests ),
rec( 38, 4, PhysicalWriteRequests ),
rec( 42, 2, PhysicalReadErrors ),
rec( 44, 2, PhysicalWriteErrors ),
rec( 46, 4, CacheGetRequests ),
rec( 50, 4, CacheFullWriteRequests ),
rec( 54, 4, CachePartialWriteRequests ),
rec( 58, 4, BackgroundDirtyWrites ),
rec( 62, 4, BackgroundAgedWrites ),
rec( 66, 4, TotalCacheWrites ),
rec( 70, 4, CacheAllocations ),
rec( 74, 2, ThrashingCount ),
rec( 76, 2, LRUBlockWasDirty ),
rec( 78, 2, ReadBeyondWrite ),
rec( 80, 2, FragmentWriteOccurred ),
rec( 82, 2, CacheHitOnUnavailableBlock ),
rec( 84, 2, CacheBlockScrapped ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
# 2222/17D7, 23/215
pkt = NCP(0x17D7, "Get Drive Mapping Table", 'fileserver')
pkt.Request(10)
pkt.Reply(184, [
rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
rec( 12, 1, SFTSupportLevel ),
rec( 13, 1, LogicalDriveCount ),
rec( 14, 1, PhysicalDriveCount ),
rec( 15, 1, DiskChannelTable ),
rec( 16, 4, Reserved4 ),
rec( 20, 2, PendingIOCommands, ENC_BIG_ENDIAN ),
rec( 22, 32, DriveMappingTable ),
rec( 54, 32, DriveMirrorTable ),
rec( 86, 32, DeadMirrorTable ),
rec( 118, 1, ReMirrorDriveNumber ),
rec( 119, 1, Filler ),
rec( 120, 4, ReMirrorCurrentOffset, ENC_BIG_ENDIAN ),
rec( 124, 60, SFTErrorTable ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
# 2222/17D8, 23/216
pkt = NCP(0x17D8, "Read Physical Disk Statistics", 'fileserver')
pkt.Request(11, [
rec( 10, 1, PhysicalDiskNumber ),
])
pkt.Reply(101, [
rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
rec( 12, 1, PhysicalDiskChannel ),
rec( 13, 1, DriveRemovableFlag ),
rec( 14, 1, PhysicalDriveType ),
rec( 15, 1, ControllerDriveNumber ),
rec( 16, 1, ControllerNumber ),
rec( 17, 1, ControllerType ),
rec( 18, 4, DriveSize ),
rec( 22, 2, DriveCylinders ),
rec( 24, 1, DriveHeads ),
rec( 25, 1, SectorsPerTrack ),
rec( 26, 64, DriveDefinitionString ),
rec( 90, 2, IOErrorCount ),
rec( 92, 4, HotFixTableStart ),
rec( 96, 2, HotFixTableSize ),
rec( 98, 2, HotFixBlocksAvailable ),
rec( 100, 1, HotFixDisabled ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
# 2222/17D9, 23/217
pkt = NCP(0x17D9, "Get Disk Channel Statistics", 'fileserver')
pkt.Request(11, [
rec( 10, 1, DiskChannelNumber ),
])
pkt.Reply(192, [
rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
rec( 12, 2, ChannelState, ENC_BIG_ENDIAN ),
rec( 14, 2, ChannelSynchronizationState, ENC_BIG_ENDIAN ),
rec( 16, 1, SoftwareDriverType ),
rec( 17, 1, SoftwareMajorVersionNumber ),
rec( 18, 1, SoftwareMinorVersionNumber ),
rec( 19, 65, SoftwareDescription ),
rec( 84, 8, IOAddressesUsed ),
rec( 92, 10, SharedMemoryAddresses ),
rec( 102, 4, InterruptNumbersUsed ),
rec( 106, 4, DMAChannelsUsed ),
rec( 110, 1, FlagBits ),
rec( 111, 1, Reserved ),
rec( 112, 80, ConfigurationDescription ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
# 2222/17DB, 23/219
pkt = NCP(0x17DB, "Get Connection's Open Files", 'fileserver')
pkt.Request(14, [
rec( 10, 2, ConnectionNumber ),
rec( 12, 2, LastRecordSeen, ENC_BIG_ENDIAN ),
])
pkt.Reply(32, [
rec( 8, 2, NextRequestRecord ),
rec( 10, 1, NumberOfRecords, var="x" ),
rec( 11, 21, ConnStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
# 2222/17DC, 23/220
pkt = NCP(0x17DC, "Get Connection Using A File", 'fileserver')
pkt.Request((14,268), [
rec( 10, 2, LastRecordSeen, ENC_BIG_ENDIAN ),
rec( 12, 1, DirHandle ),
rec( 13, (1,255), Path, info_str=(Path, "Get Connection Using File: %s", ", %s") ),
])
pkt.Reply(30, [
rec( 8, 2, UseCount, ENC_BIG_ENDIAN ),
rec( 10, 2, OpenCount, ENC_BIG_ENDIAN ),
rec( 12, 2, OpenForReadCount, ENC_BIG_ENDIAN ),
rec( 14, 2, OpenForWriteCount, ENC_BIG_ENDIAN ),
rec( 16, 2, DenyReadCount, ENC_BIG_ENDIAN ),
rec( 18, 2, DenyWriteCount, ENC_BIG_ENDIAN ),
rec( 20, 2, NextRequestRecord, ENC_BIG_ENDIAN ),
rec( 22, 1, Locked ),
rec( 23, 1, NumberOfRecords, var="x" ),
rec( 24, 6, ConnFileStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
# 2222/17DD, 23/221
pkt = NCP(0x17DD, "Get Physical Record Locks By Connection And File", 'fileserver')
pkt.Request(31, [
rec( 10, 2, TargetConnectionNumber ),
rec( 12, 2, LastRecordSeen, ENC_BIG_ENDIAN ),
rec( 14, 1, VolumeNumber ),
rec( 15, 2, DirectoryID ),
rec( 17, 14, FileName14, info_str=(FileName14, "Get Physical Record Locks by Connection and File: %s", ", %s") ),
])
pkt.Reply(22, [
rec( 8, 2, NextRequestRecord ),
rec( 10, 1, NumberOfLocks, var="x" ),
rec( 11, 1, Reserved ),
rec( 12, 10, LockStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17DE, 23/222
pkt = NCP(0x17DE, "Get Physical Record Locks By File", 'fileserver')
pkt.Request((14,268), [
rec( 10, 2, TargetConnectionNumber ),
rec( 12, 1, DirHandle ),
rec( 13, (1,255), Path, info_str=(Path, "Get Physical Record Locks by File: %s", ", %s") ),
])
pkt.Reply(28, [
rec( 8, 2, NextRequestRecord ),
rec( 10, 1, NumberOfLocks, var="x" ),
rec( 11, 1, Reserved ),
rec( 12, 16, PhyLockStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17DF, 23/223
pkt = NCP(0x17DF, "Get Logical Records By Connection", 'fileserver')
pkt.Request(14, [
rec( 10, 2, TargetConnectionNumber ),
rec( 12, 2, LastRecordSeen, ENC_BIG_ENDIAN ),
])
pkt.Reply((14,268), [
rec( 8, 2, NextRequestRecord ),
rec( 10, 1, NumberOfRecords, var="x" ),
rec( 11, (3, 257), LogLockStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17E0, 23/224
pkt = NCP(0x17E0, "Get Logical Record Information", 'fileserver')
pkt.Request((13,267), [
rec( 10, 2, LastRecordSeen ),
rec( 12, (1,255), LogicalRecordName, info_str=(LogicalRecordName, "Get Logical Record Information: %s", ", %s") ),
])
pkt.Reply(20, [
rec( 8, 2, UseCount, ENC_BIG_ENDIAN ),
rec( 10, 2, ShareableLockCount, ENC_BIG_ENDIAN ),
rec( 12, 2, NextRequestRecord ),
rec( 14, 1, Locked ),
rec( 15, 1, NumberOfRecords, var="x" ),
rec( 16, 4, LogRecStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17E1, 23/225
pkt = NCP(0x17E1, "Get Connection's Semaphores", 'fileserver')
pkt.Request(14, [
rec( 10, 2, ConnectionNumber ),
rec( 12, 2, LastRecordSeen ),
])
pkt.Reply((18,272), [
rec( 8, 2, NextRequestRecord ),
rec( 10, 2, NumberOfSemaphores, var="x" ),
rec( 12, (6,260), SemaStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17E2, 23/226
pkt = NCP(0x17E2, "Get Semaphore Information", 'fileserver')
pkt.Request((13,267), [
rec( 10, 2, LastRecordSeen ),
rec( 12, (1,255), SemaphoreName, info_str=(SemaphoreName, "Get Semaphore Information: %s", ", %s") ),
])
pkt.Reply(17, [
rec( 8, 2, NextRequestRecord, ENC_BIG_ENDIAN ),
rec( 10, 2, OpenCount, ENC_BIG_ENDIAN ),
rec( 12, 1, SemaphoreValue ),
rec( 13, 1, NumberOfRecords, var="x" ),
rec( 14, 3, SemaInfoStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17E3, 23/227
pkt = NCP(0x17E3, "Get LAN Driver Configuration Information", 'fileserver')
pkt.Request(11, [
rec( 10, 1, LANDriverNumber ),
])
pkt.Reply(180, [
rec( 8, 4, NetworkAddress, ENC_BIG_ENDIAN ),
rec( 12, 6, HostAddress ),
rec( 18, 1, BoardInstalled ),
rec( 19, 1, OptionNumber ),
rec( 20, 160, ConfigurationText ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17E5, 23/229
pkt = NCP(0x17E5, "Get Connection Usage Statistics", 'fileserver')
pkt.Request(12, [
rec( 10, 2, ConnectionNumber ),
])
pkt.Reply(26, [
rec( 8, 2, NextRequestRecord ),
rec( 10, 6, BytesRead ),
rec( 16, 6, BytesWritten ),
rec( 22, 4, TotalRequestPackets ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17E6, 23/230
pkt = NCP(0x17E6, "Get Object's Remaining Disk Space", 'fileserver')
pkt.Request(14, [
rec( 10, 4, ObjectID, ENC_BIG_ENDIAN ),
])
pkt.Reply(21, [
rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
rec( 12, 4, ObjectID ),
rec( 16, 4, UnusedDiskBlocks, ENC_BIG_ENDIAN ),
rec( 20, 1, RestrictionsEnforced ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17E7, 23/231
pkt = NCP(0x17E7, "Get File Server LAN I/O Statistics", 'fileserver')
pkt.Request(10)
pkt.Reply(74, [
rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
rec( 12, 2, ConfiguredMaxRoutingBuffers ),
rec( 14, 2, ActualMaxUsedRoutingBuffers ),
rec( 16, 2, CurrentlyUsedRoutingBuffers ),
rec( 18, 4, TotalFileServicePackets ),
rec( 22, 2, TurboUsedForFileService ),
rec( 24, 2, PacketsFromInvalidConnection ),
rec( 26, 2, BadLogicalConnectionCount ),
rec( 28, 2, PacketsReceivedDuringProcessing ),
rec( 30, 2, RequestsReprocessed ),
rec( 32, 2, PacketsWithBadSequenceNumber ),
rec( 34, 2, DuplicateRepliesSent ),
rec( 36, 2, PositiveAcknowledgesSent ),
rec( 38, 2, PacketsWithBadRequestType ),
rec( 40, 2, AttachDuringProcessing ),
rec( 42, 2, AttachWhileProcessingAttach ),
rec( 44, 2, ForgedDetachedRequests ),
rec( 46, 2, DetachForBadConnectionNumber ),
rec( 48, 2, DetachDuringProcessing ),
rec( 50, 2, RepliesCancelled ),
rec( 52, 2, PacketsDiscardedByHopCount ),
rec( 54, 2, PacketsDiscardedUnknownNet ),
rec( 56, 2, IncomingPacketDiscardedNoDGroup ),
rec( 58, 2, OutgoingPacketDiscardedNoTurboBuffer ),
rec( 60, 2, IPXNotMyNetwork ),
rec( 62, 4, NetBIOSBroadcastWasPropagated ),
rec( 66, 4, TotalOtherPackets ),
rec( 70, 4, TotalRoutedPackets ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17E8, 23/232
pkt = NCP(0x17E8, "Get File Server Misc Information", 'fileserver')
pkt.Request(10)
pkt.Reply(40, [
rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
rec( 12, 1, ProcessorType ),
rec( 13, 1, Reserved ),
rec( 14, 1, NumberOfServiceProcesses ),
rec( 15, 1, ServerUtilizationPercentage ),
rec( 16, 2, ConfiguredMaxBinderyObjects ),
rec( 18, 2, ActualMaxBinderyObjects ),
rec( 20, 2, CurrentUsedBinderyObjects ),
rec( 22, 2, TotalServerMemory ),
rec( 24, 2, WastedServerMemory ),
rec( 26, 2, NumberOfDynamicMemoryAreas, var="x" ),
rec( 28, 12, DynMemStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17E9, 23/233
pkt = NCP(0x17E9, "Get Volume Information", 'fileserver')
pkt.Request(11, [
rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Information on Volume %d", ", %d") ),
])
pkt.Reply(48, [
rec( 8, 4, SystemIntervalMarker, ENC_BIG_ENDIAN ),
rec( 12, 1, VolumeNumber ),
rec( 13, 1, LogicalDriveNumber ),
rec( 14, 2, BlockSize ),
rec( 16, 2, StartingBlock ),
rec( 18, 2, TotalBlocks ),
rec( 20, 2, FreeBlocks ),
rec( 22, 2, TotalDirectoryEntries ),
rec( 24, 2, FreeDirectoryEntries ),
rec( 26, 2, ActualMaxUsedDirectoryEntries ),
rec( 28, 1, VolumeHashedFlag ),
rec( 29, 1, VolumeCachedFlag ),
rec( 30, 1, VolumeRemovableFlag ),
rec( 31, 1, VolumeMountedFlag ),
rec( 32, 16, VolumeName ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17EA, 23/234
pkt = NCP(0x17EA, "Get Connection's Task Information", 'fileserver')
pkt.Request(12, [
rec( 10, 2, ConnectionNumber ),
])
pkt.Reply(13, [
rec( 8, 1, ConnLockStatus ),
rec( 9, 1, NumberOfActiveTasks, var="x" ),
rec( 10, 3, TaskStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17EB, 23/235
pkt = NCP(0x17EB, "Get Connection's Open Files", 'fileserver')
pkt.Request(14, [
rec( 10, 2, ConnectionNumber ),
rec( 12, 2, LastRecordSeen ),
])
pkt.Reply((29,283), [
rec( 8, 2, NextRequestRecord ),
rec( 10, 2, NumberOfRecords, var="x" ),
rec( 12, (17, 271), OpnFilesStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
# 2222/17EC, 23/236
pkt = NCP(0x17EC, "Get Connection Using A File", 'fileserver')
pkt.Request(18, [
rec( 10, 1, DataStreamNumber ),
rec( 11, 1, VolumeNumber ),
rec( 12, 4, DirectoryBase, ENC_LITTLE_ENDIAN ),
rec( 16, 2, LastRecordSeen ),
])
pkt.Reply(33, [
rec( 8, 2, NextRequestRecord ),
rec( 10, 2, FileUseCount ),
rec( 12, 2, OpenCount ),
rec( 14, 2, OpenForReadCount ),
rec( 16, 2, OpenForWriteCount ),
rec( 18, 2, DenyReadCount ),
rec( 20, 2, DenyWriteCount ),
rec( 22, 1, Locked ),
rec( 23, 1, ForkCount ),
rec( 24, 2, NumberOfRecords, var="x" ),
rec( 26, 7, ConnFileStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xff00])
# 2222/17ED, 23/237
pkt = NCP(0x17ED, "Get Physical Record Locks By Connection And File", 'fileserver')
pkt.Request(20, [
rec( 10, 2, TargetConnectionNumber ),
rec( 12, 1, DataStreamNumber ),
rec( 13, 1, VolumeNumber ),
rec( 14, 4, DirectoryBase, ENC_LITTLE_ENDIAN ),
rec( 18, 2, LastRecordSeen ),
])
pkt.Reply(23, [
rec( 8, 2, NextRequestRecord ),
rec( 10, 2, NumberOfLocks, ENC_LITTLE_ENDIAN, var="x" ),
rec( 12, 11, LockStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17EE, 23/238
pkt = NCP(0x17EE, "Get Physical Record Locks By File", 'fileserver')
pkt.Request(18, [
rec( 10, 1, DataStreamNumber ),
rec( 11, 1, VolumeNumber ),
rec( 12, 4, DirectoryBase ),
rec( 16, 2, LastRecordSeen ),
])
pkt.Reply(30, [
rec( 8, 2, NextRequestRecord ),
rec( 10, 2, NumberOfLocks, ENC_LITTLE_ENDIAN, var="x" ),
rec( 12, 18, PhyLockStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17EF, 23/239
pkt = NCP(0x17EF, "Get Logical Records By Connection", 'fileserver')
pkt.Request(14, [
rec( 10, 2, TargetConnectionNumber ),
rec( 12, 2, LastRecordSeen ),
])
pkt.Reply((16,270), [
rec( 8, 2, NextRequestRecord ),
rec( 10, 2, NumberOfRecords, var="x" ),
rec( 12, (4, 258), LogLockStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17F0, 23/240
pkt = NCP(0x17F0, "Get Logical Record Information (old)", 'fileserver')
pkt.Request((13,267), [
rec( 10, 2, LastRecordSeen ),
rec( 12, (1,255), LogicalRecordName ),
])
pkt.Reply(22, [
rec( 8, 2, ShareableLockCount ),
rec( 10, 2, UseCount ),
rec( 12, 1, Locked ),
rec( 13, 2, NextRequestRecord ),
rec( 15, 2, NumberOfRecords, var="x" ),
rec( 17, 5, LogRecStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17F1, 23/241
pkt = NCP(0x17F1, "Get Connection's Semaphores", 'fileserver')
pkt.Request(14, [
rec( 10, 2, ConnectionNumber ),
rec( 12, 2, LastRecordSeen ),
])
pkt.Reply((19,273), [
rec( 8, 2, NextRequestRecord ),
rec( 10, 2, NumberOfSemaphores, var="x" ),
rec( 12, (7, 261), SemaStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17F2, 23/242
pkt = NCP(0x17F2, "Get Semaphore Information", 'fileserver')
pkt.Request((13,267), [
rec( 10, 2, LastRecordSeen ),
rec( 12, (1,255), SemaphoreName, info_str=(SemaphoreName, "Get Semaphore Information: %s", ", %s") ),
])
pkt.Reply(20, [
rec( 8, 2, NextRequestRecord ),
rec( 10, 2, OpenCount ),
rec( 12, 2, SemaphoreValue ),
rec( 14, 2, NumberOfRecords, var="x" ),
rec( 16, 4, SemaInfoStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17F3, 23/243
pkt = NCP(0x17F3, "Map Directory Number to Path", 'file')
pkt.Request(16, [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, DirectoryNumber ),
rec( 15, 1, NameSpace ),
])
pkt.Reply((9,263), [
rec( 8, (1,255), Path ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9c00, 0xc601, 0xfd00, 0xff00])
# 2222/17F4, 23/244
pkt = NCP(0x17F4, "Convert Path to Dir Entry", 'file')
pkt.Request((12,266), [
rec( 10, 1, DirHandle ),
rec( 11, (1,255), Path, info_str=(Path, "Convert Path to Directory Entry: %s", ", %s") ),
])
pkt.Reply(13, [
rec( 8, 1, VolumeNumber ),
rec( 9, 4, DirectoryNumber ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xc601, 0xfd00, 0xff00])
# 2222/17FD, 23/253
pkt = NCP(0x17FD, "Send Console Broadcast", 'fileserver')
pkt.Request((16, 270), [
rec( 10, 1, NumberOfStations, var="x" ),
rec( 11, 4, StationList, repeat="x" ),
rec( 15, (1, 255), TargetMessage, info_str=(TargetMessage, "Send Console Broadcast: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601, 0xfd00])
# 2222/17FE, 23/254
pkt = NCP(0x17FE, "Clear Connection Number", 'fileserver')
pkt.Request(14, [
rec( 10, 4, ConnectionNumber ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xc601, 0xfd00])
# 2222/18, 24
pkt = NCP(0x18, "End of Job", 'connection')
pkt.Request(7)
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/19, 25
pkt = NCP(0x19, "Logout", 'connection')
pkt.Request(7)
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/1A, 26
pkt = NCP(0x1A, "Log Physical Record", 'sync')
pkt.Request(24, [
rec( 7, 1, LockFlag ),
rec( 8, 6, FileHandle ),
rec( 14, 4, LockAreasStartOffset, ENC_BIG_ENDIAN ),
rec( 18, 4, LockAreaLen, ENC_BIG_ENDIAN, info_str=(LockAreaLen, "Lock Record - Length of %d", "%d") ),
rec( 22, 2, LockTimeout ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01])
# 2222/1B, 27
pkt = NCP(0x1B, "Lock Physical Record Set", 'sync')
pkt.Request(10, [
rec( 7, 1, LockFlag ),
rec( 8, 2, LockTimeout ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01])
# 2222/1C, 28
pkt = NCP(0x1C, "Release Physical Record", 'sync')
pkt.Request(22, [
rec( 7, 1, Reserved ),
rec( 8, 6, FileHandle ),
rec( 14, 4, LockAreasStartOffset ),
rec( 18, 4, LockAreaLen, info_str=(LockAreaLen, "Release Lock Record - Length of %d", "%d") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03])
# 2222/1D, 29
pkt = NCP(0x1D, "Release Physical Record Set", 'sync')
pkt.Request(8, [
rec( 7, 1, LockFlag ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03])
# 2222/1E, 30 #Tested and fixed 6-14-02 GM
pkt = NCP(0x1E, "Clear Physical Record", 'sync')
pkt.Request(22, [
rec( 7, 1, Reserved ),
rec( 8, 6, FileHandle ),
rec( 14, 4, LockAreasStartOffset, ENC_BIG_ENDIAN ),
rec( 18, 4, LockAreaLen, ENC_BIG_ENDIAN, info_str=(LockAreaLen, "Clear Lock Record - Length of %d", "%d") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03])
# 2222/1F, 31
pkt = NCP(0x1F, "Clear Physical Record Set", 'sync')
pkt.Request(8, [
rec( 7, 1, LockFlag ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff03])
# 2222/2000, 32/00
pkt = NCP(0x2000, "Open Semaphore", 'sync', has_length=0)
pkt.Request((10,264), [
rec( 8, 1, InitialSemaphoreValue ),
rec( 9, (1,255), SemaphoreName, info_str=(SemaphoreName, "Open Semaphore: %s", ", %s") ),
])
pkt.Reply(13, [
rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ),
rec( 12, 1, SemaphoreOpenCount ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
# 2222/2001, 32/01
pkt = NCP(0x2001, "Examine Semaphore", 'sync', has_length=0)
pkt.Request(12, [
rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ),
])
pkt.Reply(10, [
rec( 8, 1, SemaphoreValue ),
rec( 9, 1, SemaphoreOpenCount ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
# 2222/2002, 32/02
pkt = NCP(0x2002, "Wait On Semaphore", 'sync', has_length=0)
pkt.Request(14, [
rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ),
rec( 12, 2, SemaphoreTimeOut, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
# 2222/2003, 32/03
pkt = NCP(0x2003, "Signal Semaphore", 'sync', has_length=0)
pkt.Request(12, [
rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
# 2222/2004, 32/04
pkt = NCP(0x2004, "Close Semaphore", 'sync', has_length=0)
pkt.Request(12, [
rec( 8, 4, SemaphoreHandle, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
# 2222/21, 33
pkt = NCP(0x21, "Negotiate Buffer Size", 'connection')
pkt.Request(9, [
rec( 7, 2, BufferSize, ENC_BIG_ENDIAN ),
])
pkt.Reply(10, [
rec( 8, 2, BufferSize, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000])
# 2222/2200, 34/00
pkt = NCP(0x2200, "TTS Is Available", 'tts', has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0001, 0xfd03, 0xff12])
# 2222/2201, 34/01
pkt = NCP(0x2201, "TTS Begin Transaction", 'tts', has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/2202, 34/02
pkt = NCP(0x2202, "TTS End Transaction", 'tts', has_length=0)
pkt.Request(8)
pkt.Reply(12, [
rec( 8, 4, TransactionNumber, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0xff01])
# 2222/2203, 34/03
pkt = NCP(0x2203, "TTS Abort Transaction", 'tts', has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xfd03, 0xfe0b, 0xff01])
# 2222/2204, 34/04
pkt = NCP(0x2204, "TTS Transaction Status", 'tts', has_length=0)
pkt.Request(12, [
rec( 8, 4, TransactionNumber, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/2205, 34/05
pkt = NCP(0x2205, "TTS Get Application Thresholds", 'tts', has_length=0)
pkt.Request(8)
pkt.Reply(10, [
rec( 8, 1, LogicalLockThreshold ),
rec( 9, 1, PhysicalLockThreshold ),
])
pkt.CompletionCodes([0x0000])
# 2222/2206, 34/06
pkt = NCP(0x2206, "TTS Set Application Thresholds", 'tts', has_length=0)
pkt.Request(10, [
rec( 8, 1, LogicalLockThreshold ),
rec( 9, 1, PhysicalLockThreshold ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600])
# 2222/2207, 34/07
pkt = NCP(0x2207, "TTS Get Workstation Thresholds", 'tts', has_length=0)
pkt.Request(8)
pkt.Reply(10, [
rec( 8, 1, LogicalLockThreshold ),
rec( 9, 1, PhysicalLockThreshold ),
])
pkt.CompletionCodes([0x0000])
# 2222/2208, 34/08
pkt = NCP(0x2208, "TTS Set Workstation Thresholds", 'tts', has_length=0)
pkt.Request(10, [
rec( 8, 1, LogicalLockThreshold ),
rec( 9, 1, PhysicalLockThreshold ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/2209, 34/09
pkt = NCP(0x2209, "TTS Get Transaction Bits", 'tts', has_length=0)
pkt.Request(8)
pkt.Reply(9, [
rec( 8, 1, ControlFlags ),
])
pkt.CompletionCodes([0x0000])
# 2222/220A, 34/10
pkt = NCP(0x220A, "TTS Set Transaction Bits", 'tts', has_length=0)
pkt.Request(9, [
rec( 8, 1, ControlFlags ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/2301, 35/01
pkt = NCP(0x2301, "AFP Create Directory", 'afp')
pkt.Request((49, 303), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, BaseDirectoryID ),
rec( 15, 1, Reserved ),
rec( 16, 4, CreatorID ),
rec( 20, 4, Reserved4 ),
rec( 24, 2, FinderAttr ),
rec( 26, 2, HorizLocation ),
rec( 28, 2, VertLocation ),
rec( 30, 2, FileDirWindow ),
rec( 32, 16, Reserved16 ),
rec( 48, (1,255), Path, info_str=(Path, "AFP Create Directory: %s", ", %s") ),
])
pkt.Reply(12, [
rec( 8, 4, NewDirectoryID ),
])
pkt.CompletionCodes([0x0000, 0x8301, 0x8400, 0x8800, 0x9300, 0x9600, 0x9804,
0x9900, 0x9c03, 0x9e02, 0xa100, 0xa201, 0xfd00, 0xff18])
# 2222/2302, 35/02
pkt = NCP(0x2302, "AFP Create File", 'afp')
pkt.Request((49, 303), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, BaseDirectoryID ),
rec( 15, 1, DeleteExistingFileFlag ),
rec( 16, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 20, 4, Reserved4 ),
rec( 24, 2, FinderAttr ),
rec( 26, 2, HorizLocation, ENC_BIG_ENDIAN ),
rec( 28, 2, VertLocation, ENC_BIG_ENDIAN ),
rec( 30, 2, FileDirWindow, ENC_BIG_ENDIAN ),
rec( 32, 16, Reserved16 ),
rec( 48, (1,255), Path, info_str=(Path, "AFP Create File: %s", ", %s") ),
])
pkt.Reply(12, [
rec( 8, 4, NewDirectoryID ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8301, 0x8400, 0x8701, 0x8800,
0x8a00, 0x8d00, 0x8e00, 0x8f00, 0x9300, 0x9600, 0x9804,
0x9900, 0x9b03, 0x9c03, 0x9e02, 0xa100, 0xa201, 0xfd00,
0xff18])
# 2222/2303, 35/03
pkt = NCP(0x2303, "AFP Delete", 'afp')
pkt.Request((16,270), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, BaseDirectoryID ),
rec( 15, (1,255), Path, info_str=(Path, "AFP Delete: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x8a00, 0x8d00, 0x8e00, 0x8f00,
0x9000, 0x9300, 0x9600, 0x9804, 0x9b03, 0x9c03, 0x9e02,
0xa000, 0xa100, 0xa201, 0xfd00, 0xff19])
# 2222/2304, 35/04
pkt = NCP(0x2304, "AFP Get Entry ID From Name", 'afp')
pkt.Request((16,270), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, BaseDirectoryID ),
rec( 15, (1,255), Path, info_str=(Path, "AFP Get Entry from Name: %s", ", %s") ),
])
pkt.Reply(12, [
rec( 8, 4, TargetEntryID, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, 0x9c03,
0xa100, 0xa201, 0xfd00, 0xff19])
# 2222/2305, 35/05
pkt = NCP(0x2305, "AFP Get File Information", 'afp')
pkt.Request((18,272), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, BaseDirectoryID ),
rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ),
rec( 17, (1,255), Path, info_str=(Path, "AFP Get File Information: %s", ", %s") ),
])
pkt.Reply(121, [
rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ),
rec( 12, 4, ParentID, ENC_BIG_ENDIAN ),
rec( 16, 2, AttributesDef16, ENC_LITTLE_ENDIAN ),
rec( 18, 4, DataForkLen, ENC_BIG_ENDIAN ),
rec( 22, 4, ResourceForkLen, ENC_BIG_ENDIAN ),
rec( 26, 2, TotalOffspring, ENC_BIG_ENDIAN ),
rec( 28, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 30, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 32, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 34, 2, ModifiedTime, ENC_BIG_ENDIAN ),
rec( 36, 2, ArchivedDate, ENC_BIG_ENDIAN ),
rec( 38, 2, ArchivedTime, ENC_BIG_ENDIAN ),
rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 44, 4, Reserved4 ),
rec( 48, 2, FinderAttr ),
rec( 50, 2, HorizLocation ),
rec( 52, 2, VertLocation ),
rec( 54, 2, FileDirWindow ),
rec( 56, 16, Reserved16 ),
rec( 72, 32, LongName ),
rec( 104, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 108, 12, ShortName ),
rec( 120, 1, AccessPrivileges ),
])
pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, 0x9c03,
0xa100, 0xa201, 0xfd00, 0xff19])
# 2222/2306, 35/06
pkt = NCP(0x2306, "AFP Get Entry ID From NetWare Handle", 'afp')
pkt.Request(16, [
rec( 10, 6, FileHandle ),
])
pkt.Reply(14, [
rec( 8, 1, VolumeID ),
rec( 9, 4, TargetEntryID, ENC_BIG_ENDIAN ),
rec( 13, 1, ForkIndicator ),
])
pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0xa201])
# 2222/2307, 35/07
pkt = NCP(0x2307, "AFP Rename", 'afp')
pkt.Request((21, 529), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, MacSourceBaseID, ENC_BIG_ENDIAN ),
rec( 15, 4, MacDestinationBaseID, ENC_BIG_ENDIAN ),
rec( 19, (1,255), Path, info_str=(Path, "AFP Rename: %s", ", %s") ),
rec( -1, (1,255), NewFileNameLen ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8301, 0x8401, 0x8800, 0x8b00, 0x8e00,
0x9001, 0x9201, 0x9300, 0x9600, 0x9804, 0x9900,
0x9c03, 0x9e00, 0xa100, 0xa201, 0xfd00, 0xff0a])
# 2222/2308, 35/08
pkt = NCP(0x2308, "AFP Open File Fork", 'afp')
pkt.Request((18, 272), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, MacBaseDirectoryID ),
rec( 15, 1, ForkIndicator ),
rec( 16, 1, AccessMode ),
rec( 17, (1,255), Path, info_str=(Path, "AFP Open File Fork: %s", ", %s") ),
])
pkt.Reply(22, [
rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ),
rec( 12, 4, DataForkLen, ENC_BIG_ENDIAN ),
rec( 16, 6, NetWareAccessHandle ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8301, 0x8800, 0x9300,
0x9400, 0x9600, 0x9804, 0x9900, 0x9c03, 0xa100,
0xa201, 0xfd00, 0xff16])
# 2222/2309, 35/09
pkt = NCP(0x2309, "AFP Set File Information", 'afp')
pkt.Request((64, 318), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, MacBaseDirectoryID ),
rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ),
rec( 17, 2, MacAttr, ENC_BIG_ENDIAN ),
rec( 19, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 21, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 23, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 25, 2, ModifiedTime, ENC_BIG_ENDIAN ),
rec( 27, 2, ArchivedDate, ENC_BIG_ENDIAN ),
rec( 29, 2, ArchivedTime, ENC_BIG_ENDIAN ),
rec( 31, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 35, 4, Reserved4 ),
rec( 39, 2, FinderAttr ),
rec( 41, 2, HorizLocation ),
rec( 43, 2, VertLocation ),
rec( 45, 2, FileDirWindow ),
rec( 47, 16, Reserved16 ),
rec( 63, (1,255), Path, info_str=(Path, "AFP Set File Information: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0104, 0x8301, 0x8800, 0x9300, 0x9400,
0x9500, 0x9600, 0x9804, 0x9c03, 0xa100, 0xa201,
0xfd00, 0xff16])
# 2222/230A, 35/10
pkt = NCP(0x230A, "AFP Scan File Information", 'afp')
pkt.Request((26, 280), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, MacBaseDirectoryID ),
rec( 15, 4, MacLastSeenID, ENC_BIG_ENDIAN ),
rec( 19, 2, DesiredResponseCount, ENC_BIG_ENDIAN ),
rec( 21, 2, SearchBitMap, ENC_BIG_ENDIAN ),
rec( 23, 2, RequestBitMap, ENC_BIG_ENDIAN ),
rec( 25, (1,255), Path, info_str=(Path, "AFP Scan File Information: %s", ", %s") ),
])
pkt.Reply(123, [
rec( 8, 2, ActualResponseCount, ENC_BIG_ENDIAN, var="x" ),
rec( 10, 113, AFP10Struct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804,
0x9c03, 0xa100, 0xa201, 0xfd00, 0xff16])
# 2222/230B, 35/11
pkt = NCP(0x230B, "AFP Alloc Temporary Directory Handle", 'afp')
pkt.Request((16,270), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, MacBaseDirectoryID ),
rec( 15, (1,255), Path, info_str=(Path, "AFP Allocate Temporary Directory Handle: %s", ", %s") ),
])
pkt.Reply(10, [
rec( 8, 1, DirHandle ),
rec( 9, 1, AccessRightsMask ),
])
pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600,
0x9804, 0x9b03, 0x9c03, 0x9d00, 0xa100,
0xa201, 0xfd00, 0xff00])
# 2222/230C, 35/12
pkt = NCP(0x230C, "AFP Get Entry ID From Path Name", 'afp')
pkt.Request((12,266), [
rec( 10, 1, DirHandle ),
rec( 11, (1,255), Path, info_str=(Path, "AFP Get Entry ID from Path Name: %s", ", %s") ),
])
pkt.Reply(12, [
rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa100, 0xa201,
0xfd00, 0xff00])
# 2222/230D, 35/13
pkt = NCP(0x230D, "AFP 2.0 Create Directory", 'afp')
pkt.Request((55,309), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, BaseDirectoryID ),
rec( 15, 1, Reserved ),
rec( 16, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 20, 4, Reserved4 ),
rec( 24, 2, FinderAttr ),
rec( 26, 2, HorizLocation ),
rec( 28, 2, VertLocation ),
rec( 30, 2, FileDirWindow ),
rec( 32, 16, Reserved16 ),
rec( 48, 6, ProDOSInfo ),
rec( 54, (1,255), Path, info_str=(Path, "AFP 2.0 Create Directory: %s", ", %s") ),
])
pkt.Reply(12, [
rec( 8, 4, NewDirectoryID ),
])
pkt.CompletionCodes([0x0000, 0x8301, 0x8400, 0x8800, 0x9300,
0x9600, 0x9804, 0x9900, 0x9c03, 0x9e00,
0xa100, 0xa201, 0xfd00, 0xff00])
# 2222/230E, 35/14
pkt = NCP(0x230E, "AFP 2.0 Create File", 'afp')
pkt.Request((55,309), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, BaseDirectoryID ),
rec( 15, 1, DeleteExistingFileFlag ),
rec( 16, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 20, 4, Reserved4 ),
rec( 24, 2, FinderAttr ),
rec( 26, 2, HorizLocation ),
rec( 28, 2, VertLocation ),
rec( 30, 2, FileDirWindow ),
rec( 32, 16, Reserved16 ),
rec( 48, 6, ProDOSInfo ),
rec( 54, (1,255), Path, info_str=(Path, "AFP 2.0 Create File: %s", ", %s") ),
])
pkt.Reply(12, [
rec( 8, 4, NewDirectoryID ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8301, 0x8400,
0x8701, 0x8800, 0x8a00, 0x8d00, 0x8e00,
0x8f00, 0x9001, 0x9300, 0x9600, 0x9804,
0x9900, 0x9b03, 0x9c03, 0x9e00, 0xa100,
0xa201, 0xfd00, 0xff00])
# 2222/230F, 35/15
pkt = NCP(0x230F, "AFP 2.0 Get File Or Directory Information", 'afp')
pkt.Request((18,272), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, BaseDirectoryID ),
rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ),
rec( 17, (1,255), Path, info_str=(Path, "AFP 2.0 Get Information: %s", ", %s") ),
])
pkt.Reply(128, [
rec( 8, 4, AFPEntryID, ENC_BIG_ENDIAN ),
rec( 12, 4, ParentID, ENC_BIG_ENDIAN ),
rec( 16, 2, AttributesDef16 ),
rec( 18, 4, DataForkLen, ENC_BIG_ENDIAN ),
rec( 22, 4, ResourceForkLen, ENC_BIG_ENDIAN ),
rec( 26, 2, TotalOffspring, ENC_BIG_ENDIAN ),
rec( 28, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 30, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 32, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 34, 2, ModifiedTime, ENC_BIG_ENDIAN ),
rec( 36, 2, ArchivedDate, ENC_BIG_ENDIAN ),
rec( 38, 2, ArchivedTime, ENC_BIG_ENDIAN ),
rec( 40, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 44, 4, Reserved4 ),
rec( 48, 2, FinderAttr ),
rec( 50, 2, HorizLocation ),
rec( 52, 2, VertLocation ),
rec( 54, 2, FileDirWindow ),
rec( 56, 16, Reserved16 ),
rec( 72, 32, LongName ),
rec( 104, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 108, 12, ShortName ),
rec( 120, 1, AccessPrivileges ),
rec( 121, 1, Reserved ),
rec( 122, 6, ProDOSInfo ),
])
pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804, 0x9c03,
0xa100, 0xa201, 0xfd00, 0xff19])
# 2222/2310, 35/16
pkt = NCP(0x2310, "AFP 2.0 Set File Information", 'afp')
pkt.Request((70, 324), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, MacBaseDirectoryID ),
rec( 15, 2, RequestBitMap, ENC_BIG_ENDIAN ),
rec( 17, 2, AttributesDef16 ),
rec( 19, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 21, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 23, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 25, 2, ModifiedTime, ENC_BIG_ENDIAN ),
rec( 27, 2, ArchivedDate, ENC_BIG_ENDIAN ),
rec( 29, 2, ArchivedTime, ENC_BIG_ENDIAN ),
rec( 31, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 35, 4, Reserved4 ),
rec( 39, 2, FinderAttr ),
rec( 41, 2, HorizLocation ),
rec( 43, 2, VertLocation ),
rec( 45, 2, FileDirWindow ),
rec( 47, 16, Reserved16 ),
rec( 63, 6, ProDOSInfo ),
rec( 69, (1,255), Path, info_str=(Path, "AFP 2.0 Set File Information: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0104, 0x8301, 0x8800, 0x9300, 0x9400,
0x9500, 0x9600, 0x9804, 0x9c03, 0xa100, 0xa201,
0xfd00, 0xff16])
# 2222/2311, 35/17
pkt = NCP(0x2311, "AFP 2.0 Scan File Information", 'afp')
pkt.Request((26, 280), [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, MacBaseDirectoryID ),
rec( 15, 4, MacLastSeenID, ENC_BIG_ENDIAN ),
rec( 19, 2, DesiredResponseCount, ENC_BIG_ENDIAN ),
rec( 21, 2, SearchBitMap, ENC_BIG_ENDIAN ),
rec( 23, 2, RequestBitMap, ENC_BIG_ENDIAN ),
rec( 25, (1,255), Path, info_str=(Path, "AFP 2.0 Scan File Information: %s", ", %s") ),
])
pkt.Reply(14, [
rec( 8, 2, ActualResponseCount, var="x" ),
rec( 10, 4, AFP20Struct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x8301, 0x8800, 0x9300, 0x9600, 0x9804,
0x9c03, 0xa100, 0xa201, 0xfd00, 0xff16])
# 2222/2312, 35/18
pkt = NCP(0x2312, "AFP Get DOS Name From Entry ID", 'afp')
pkt.Request(15, [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, AFPEntryID, ENC_BIG_ENDIAN ),
])
pkt.Reply((9,263), [
rec( 8, (1,255), Path ),
])
pkt.CompletionCodes([0x0000, 0x8900, 0x9600, 0xbf00])
# 2222/2313, 35/19
pkt = NCP(0x2313, "AFP Get Macintosh Info On Deleted File", 'afp')
pkt.Request(15, [
rec( 10, 1, VolumeNumber ),
rec( 11, 4, DirectoryNumber, ENC_BIG_ENDIAN ),
])
pkt.Reply((51,305), [
rec( 8, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 12, 4, Reserved4 ),
rec( 16, 2, FinderAttr ),
rec( 18, 2, HorizLocation ),
rec( 20, 2, VertLocation ),
rec( 22, 2, FileDirWindow ),
rec( 24, 16, Reserved16 ),
rec( 40, 6, ProDOSInfo ),
rec( 46, 4, ResourceForkSize, ENC_BIG_ENDIAN ),
rec( 50, (1,255), FileName ),
])
pkt.CompletionCodes([0x0000, 0x9c03, 0xbf00])
# 2222/2400, 36/00
pkt = NCP(0x2400, "Get NCP Extension Information", 'extension')
pkt.Request(14, [
rec( 10, 4, NCPextensionNumber, ENC_LITTLE_ENDIAN ),
])
pkt.Reply((16,270), [
rec( 8, 4, NCPextensionNumber ),
rec( 12, 1, NCPextensionMajorVersion ),
rec( 13, 1, NCPextensionMinorVersion ),
rec( 14, 1, NCPextensionRevisionNumber ),
rec( 15, (1, 255), NCPextensionName ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
# 2222/2401, 36/01
pkt = NCP(0x2401, "Get NCP Extension Maximum Data Size", 'extension')
pkt.Request(10)
pkt.Reply(10, [
rec( 8, 2, NCPdataSize ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
# 2222/2402, 36/02
pkt = NCP(0x2402, "Get NCP Extension Information by Name", 'extension')
pkt.Request((11, 265), [
rec( 10, (1,255), NCPextensionName, info_str=(NCPextensionName, "Get NCP Extension Information by Name: %s", ", %s") ),
])
pkt.Reply((16,270), [
rec( 8, 4, NCPextensionNumber ),
rec( 12, 1, NCPextensionMajorVersion ),
rec( 13, 1, NCPextensionMinorVersion ),
rec( 14, 1, NCPextensionRevisionNumber ),
rec( 15, (1, 255), NCPextensionName ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
# 2222/2403, 36/03
pkt = NCP(0x2403, "Get Number of Registered NCP Extensions", 'extension')
pkt.Request(10)
pkt.Reply(12, [
rec( 8, 4, NumberOfNCPExtensions ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
# 2222/2404, 36/04
pkt = NCP(0x2404, "Get NCP Extension Registered Verbs List", 'extension')
pkt.Request(14, [
rec( 10, 4, StartingNumber ),
])
pkt.Reply(20, [
rec( 8, 4, ReturnedListCount, var="x" ),
rec( 12, 4, nextStartingNumber ),
rec( 16, 4, NCPExtensionNumbers, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
# 2222/2405, 36/05
pkt = NCP(0x2405, "Return NCP Extension Information", 'extension')
pkt.Request(14, [
rec( 10, 4, NCPextensionNumber ),
])
pkt.Reply((16,270), [
rec( 8, 4, NCPextensionNumber ),
rec( 12, 1, NCPextensionMajorVersion ),
rec( 13, 1, NCPextensionMinorVersion ),
rec( 14, 1, NCPextensionRevisionNumber ),
rec( 15, (1, 255), NCPextensionName ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
# 2222/2406, 36/06
pkt = NCP(0x2406, "Return NCP Extension Maximum Data Size", 'extension')
pkt.Request(10)
pkt.Reply(12, [
rec( 8, 4, NCPdataSize ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfe00, 0xff20])
# 2222/25, 37
pkt = NCP(0x25, "Execute NCP Extension", 'extension')
pkt.Request(11, [
rec( 7, 4, NCPextensionNumber ),
# The following value is Unicode
#rec[ 13, (1,255), RequestData ],
])
pkt.Reply(8)
# The following value is Unicode
#[ 8, (1, 255), ReplyBuffer ],
pkt.CompletionCodes([0x0000, 0x7e01, 0xf000, 0x9c00, 0xd504, 0xee00, 0xfe00, 0xff20])
# 2222/3B, 59
pkt = NCP(0x3B, "Commit File", 'file', has_length=0 )
pkt.Request(14, [
rec( 7, 1, Reserved ),
rec( 8, 6, FileHandle, info_str=(FileHandle, "Commit File - 0x%s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8800, 0x9804, 0xff00])
# 2222/3D, 61
pkt = NCP(0x3D, "Commit File", 'file', has_length=0 )
pkt.Request(14, [
rec( 7, 1, Reserved ),
rec( 8, 6, FileHandle, info_str=(FileHandle, "Commit File - 0x%s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8800, 0x9804, 0xff00])
# 2222/3E, 62
pkt = NCP(0x3E, "File Search Initialize", 'file', has_length=0 )
pkt.Request((9, 263), [
rec( 7, 1, DirHandle ),
rec( 8, (1,255), Path, info_str=(Path, "Initialize File Search: %s", ", %s") ),
])
pkt.Reply(14, [
rec( 8, 1, VolumeNumber ),
rec( 9, 2, DirectoryID ),
rec( 11, 2, SequenceNumber, ENC_BIG_ENDIAN ),
rec( 13, 1, AccessRightsMask ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0x9804, 0x9b03, 0x9c03, 0xa100,
0xfd00, 0xff16])
# 2222/3F, 63
pkt = NCP(0x3F, "File Search Continue", 'file', has_length=0 )
pkt.Request((14, 268), [
rec( 7, 1, VolumeNumber ),
rec( 8, 2, DirectoryID ),
rec( 10, 2, SequenceNumber, ENC_BIG_ENDIAN ),
rec( 12, 1, SearchAttributes ),
rec( 13, (1,255), Path, info_str=(Path, "File Search Continue: %s", ", %s") ),
])
pkt.Reply( NO_LENGTH_CHECK, [
#
# XXX - don't show this if we got back a non-zero
# completion code? For example, 255 means "No
# matching files or directories were found", so
# presumably it can't show you a matching file or
# directory instance - it appears to just leave crap
# there.
#
srec( DirectoryInstance, req_cond="ncp.sattr_sub==TRUE"),
srec( FileInstance, req_cond="ncp.sattr_sub!=TRUE"),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0xff16])
# 2222/40, 64
pkt = NCP(0x40, "Search for a File", 'file')
pkt.Request((12, 266), [
rec( 7, 2, SequenceNumber, ENC_BIG_ENDIAN ),
rec( 9, 1, DirHandle ),
rec( 10, 1, SearchAttributes ),
rec( 11, (1,255), FileName, info_str=(FileName, "Search for File: %s", ", %s") ),
])
pkt.Reply(40, [
rec( 8, 2, SequenceNumber, ENC_BIG_ENDIAN ),
rec( 10, 2, Reserved2 ),
rec( 12, 14, FileName14 ),
rec( 26, 1, AttributesDef ),
rec( 27, 1, FileExecuteType ),
rec( 28, 4, FileSize ),
rec( 32, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 34, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 36, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 38, 2, ModifiedTime, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8900, 0x9600, 0x9804, 0x9b03,
0x9c03, 0xa100, 0xfd00, 0xff16])
# 2222/41, 65
pkt = NCP(0x41, "Open File", 'file')
pkt.Request((10, 264), [
rec( 7, 1, DirHandle ),
rec( 8, 1, SearchAttributes ),
rec( 9, (1,255), FileName, info_str=(FileName, "Open File: %s", ", %s") ),
])
pkt.Reply(44, [
rec( 8, 6, FileHandle ),
rec( 14, 2, Reserved2 ),
rec( 16, 14, FileName14 ),
rec( 30, 1, AttributesDef ),
rec( 31, 1, FileExecuteType ),
rec( 32, 4, FileSize, ENC_BIG_ENDIAN ),
rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8200, 0x9400,
0x9600, 0x9804, 0x9c03, 0xa100, 0xfd00,
0xff16])
# 2222/42, 66
pkt = NCP(0x42, "Close File", 'file')
pkt.Request(14, [
rec( 7, 1, Reserved ),
rec( 8, 6, FileHandle, info_str=(FileHandle, "Close File - 0x%s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8800, 0xff1a])
pkt.MakeExpert("ncp42_request")
# 2222/43, 67
pkt = NCP(0x43, "Create File", 'file')
pkt.Request((10, 264), [
rec( 7, 1, DirHandle ),
rec( 8, 1, AttributesDef ),
rec( 9, (1,255), FileName, info_str=(FileName, "Create File: %s", ", %s") ),
])
pkt.Reply(44, [
rec( 8, 6, FileHandle ),
rec( 14, 2, Reserved2 ),
rec( 16, 14, FileName14 ),
rec( 30, 1, AttributesDef ),
rec( 31, 1, FileExecuteType ),
rec( 32, 4, FileSize, ENC_BIG_ENDIAN ),
rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9900, 0x9b03, 0x9c03, 0xfd00,
0xff00])
# 2222/44, 68
pkt = NCP(0x44, "Erase File", 'file')
pkt.Request((10, 264), [
rec( 7, 1, DirHandle ),
rec( 8, 1, SearchAttributes ),
rec( 9, (1,255), FileName, info_str=(FileName, "Erase File: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8a00, 0x8d00, 0x8e00, 0x8f00,
0x9001, 0x9600, 0x9804, 0x9b03, 0x9c03,
0xa100, 0xfd00, 0xff00])
# 2222/45, 69
pkt = NCP(0x45, "Rename File", 'file')
pkt.Request((12, 520), [
rec( 7, 1, DirHandle ),
rec( 8, 1, SearchAttributes ),
rec( 9, (1,255), FileName, info_str=(FileName, "Rename File: %s", ", %s") ),
rec( -1, 1, TargetDirHandle ),
rec( -1, (1, 255), NewFileNameLen ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8701, 0x8b00, 0x8d00, 0x8e00,
0x8f00, 0x9001, 0x9101, 0x9201, 0x9600,
0x9804, 0x9a00, 0x9b03, 0x9c03, 0xa100,
0xfd00, 0xff16])
# 2222/46, 70
pkt = NCP(0x46, "Set File Attributes", 'file')
pkt.Request((11, 265), [
rec( 7, 1, AttributesDef ),
rec( 8, 1, DirHandle ),
rec( 9, 1, SearchAttributes ),
rec( 10, (1,255), FileName, info_str=(FileName, "Set File Attributes: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8c00, 0x8d00, 0x8e00, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa100, 0xfd00,
0xff16])
# 2222/47, 71
pkt = NCP(0x47, "Get Current Size of File", 'file')
pkt.Request(14, [
rec(7, 1, Reserved ),
rec( 8, 6, FileHandle, info_str=(FileHandle, "Get Current Size of File - 0x%s", ", %s") ),
])
pkt.Reply(12, [
rec( 8, 4, FileSize, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8800])
# 2222/48, 72
pkt = NCP(0x48, "Read From A File", 'file')
pkt.Request(20, [
rec( 7, 1, Reserved ),
rec( 8, 6, FileHandle, info_str=(FileHandle, "Read From File - 0x%s", ", %s") ),
rec( 14, 4, FileOffset, ENC_BIG_ENDIAN ),
rec( 18, 2, MaxBytes, ENC_BIG_ENDIAN ),
])
pkt.Reply(10, [
rec( 8, 2, NumBytes, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8300, 0x8800, 0x9300, 0xff1b])
# 2222/49, 73
pkt = NCP(0x49, "Write to a File", 'file')
pkt.Request(20, [
rec( 7, 1, Reserved ),
rec( 8, 6, FileHandle, info_str=(FileHandle, "Write to a File - 0x%s", ", %s") ),
rec( 14, 4, FileOffset, ENC_BIG_ENDIAN ),
rec( 18, 2, MaxBytes, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0104, 0x8300, 0x8800, 0x9400, 0x9500, 0xa201, 0xff1b])
# 2222/4A, 74
pkt = NCP(0x4A, "Copy from One File to Another", 'file')
pkt.Request(30, [
rec( 7, 1, Reserved ),
rec( 8, 6, FileHandle ),
rec( 14, 6, TargetFileHandle ),
rec( 20, 4, FileOffset, ENC_BIG_ENDIAN ),
rec( 24, 4, TargetFileOffset, ENC_BIG_ENDIAN ),
rec( 28, 2, BytesToCopy, ENC_BIG_ENDIAN ),
])
pkt.Reply(12, [
rec( 8, 4, BytesActuallyTransferred, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x0104, 0x8300, 0x8800, 0x9300, 0x9400,
0x9500, 0x9600, 0xa201, 0xff1b])
# 2222/4B, 75
pkt = NCP(0x4B, "Set File Time Date Stamp", 'file')
pkt.Request(18, [
rec( 7, 1, Reserved ),
rec( 8, 6, FileHandle, info_str=(FileHandle, "Set Time and Date Stamp for File - 0x%s", ", %s") ),
rec( 14, 2, FileTime, ENC_BIG_ENDIAN ),
rec( 16, 2, FileDate, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8800, 0x9400, 0x9600, 0xfb08])
# 2222/4C, 76
pkt = NCP(0x4C, "Open File", 'file')
pkt.Request((11, 265), [
rec( 7, 1, DirHandle ),
rec( 8, 1, SearchAttributes ),
rec( 9, 1, AccessRightsMask ),
rec( 10, (1,255), FileName, info_str=(FileName, "Open File: %s", ", %s") ),
])
pkt.Reply(44, [
rec( 8, 6, FileHandle ),
rec( 14, 2, Reserved2 ),
rec( 16, 14, FileName14 ),
rec( 30, 1, AttributesDef ),
rec( 31, 1, FileExecuteType ),
rec( 32, 4, FileSize, ENC_BIG_ENDIAN ),
rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8200, 0x9400,
0x9600, 0x9804, 0x9c03, 0xa100, 0xfd00,
0xff16])
# 2222/4D, 77
pkt = NCP(0x4D, "Create File", 'file')
pkt.Request((10, 264), [
rec( 7, 1, DirHandle ),
rec( 8, 1, AttributesDef ),
rec( 9, (1,255), FileName, info_str=(FileName, "Create File: %s", ", %s") ),
])
pkt.Reply(44, [
rec( 8, 6, FileHandle ),
rec( 14, 2, Reserved2 ),
rec( 16, 14, FileName14 ),
rec( 30, 1, AttributesDef ),
rec( 31, 1, FileExecuteType ),
rec( 32, 4, FileSize, ENC_BIG_ENDIAN ),
rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9900, 0x9b03, 0x9c03, 0xfd00,
0xff00])
# 2222/4F, 79
pkt = NCP(0x4F, "Set File Extended Attributes", 'file')
pkt.Request((11, 265), [
rec( 7, 1, AttributesDef ),
rec( 8, 1, DirHandle ),
rec( 9, 1, AccessRightsMask ),
rec( 10, (1,255), FileName, info_str=(FileName, "Set File Extended Attributes: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8c00, 0x8d00, 0x8e00, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa100, 0xfd00,
0xff16])
# 2222/54, 84
pkt = NCP(0x54, "Open/Create File", 'file')
pkt.Request((12, 266), [
rec( 7, 1, DirHandle ),
rec( 8, 1, AttributesDef ),
rec( 9, 1, AccessRightsMask ),
rec( 10, 1, ActionFlag ),
rec( 11, (1,255), FileName, info_str=(FileName, "Open/Create File: %s", ", %s") ),
])
pkt.Reply(44, [
rec( 8, 6, FileHandle ),
rec( 14, 2, Reserved2 ),
rec( 16, 14, FileName14 ),
rec( 30, 1, AttributesDef ),
rec( 31, 1, FileExecuteType ),
rec( 32, 4, FileSize, ENC_BIG_ENDIAN ),
rec( 36, 2, CreationDate, ENC_BIG_ENDIAN ),
rec( 38, 2, LastAccessedDate, ENC_BIG_ENDIAN ),
rec( 40, 2, ModifiedDate, ENC_BIG_ENDIAN ),
rec( 42, 2, ModifiedTime, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
# 2222/55, 85
pkt = NCP(0x55, "Get Sparse File Data Block Bit Map", 'file', has_length=1)
pkt.Request(19, [
rec( 7, 2, SubFuncStrucLen, ENC_BIG_ENDIAN ),
rec( 9, 6, FileHandle, info_str=(FileHandle, "Get Sparse File Data Block Bitmap for File - 0x%s", ", %s") ),
rec( 15, 4, FileOffset ),
])
pkt.Reply(528, [
rec( 8, 4, AllocationBlockSize ),
rec( 12, 4, Reserved4 ),
rec( 16, 512, BitMap ),
])
pkt.CompletionCodes([0x0000, 0x8800])
# 2222/5601, 86/01
pkt = NCP(0x5601, "Close Extended Attribute Handle", 'extended', has_length=0 )
pkt.Request(14, [
rec( 8, 2, Reserved2 ),
rec( 10, 4, EAHandle ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xcf00, 0xd301])
# 2222/5602, 86/02
pkt = NCP(0x5602, "Write Extended Attribute", 'extended', has_length=0 )
pkt.Request((35,97), [
rec( 8, 2, EAFlags ),
rec( 10, 4, EAHandleOrNetWareHandleOrVolume, ENC_BIG_ENDIAN ),
rec( 14, 4, ReservedOrDirectoryNumber ),
rec( 18, 4, TtlWriteDataSize ),
rec( 22, 4, FileOffset ),
rec( 26, 4, EAAccessFlag ),
rec( 30, 2, EAValueLength, var='x' ),
rec( 32, (2,64), EAKey, info_str=(EAKey, "Write Extended Attribute: %s", ", %s") ),
rec( -1, 1, EAValueRep, repeat='x' ),
])
pkt.Reply(20, [
rec( 8, 4, EAErrorCodes ),
rec( 12, 4, EABytesWritten ),
rec( 16, 4, NewEAHandle ),
])
pkt.CompletionCodes([0x0000, 0xc800, 0xc900, 0xcb00, 0xce00, 0xcf00, 0xd101,
0xd203, 0xd301, 0xd402, 0xda02, 0xdc01, 0xef00, 0xff00])
# 2222/5603, 86/03
pkt = NCP(0x5603, "Read Extended Attribute", 'extended', has_length=0 )
pkt.Request((28,538), [
rec( 8, 2, EAFlags ),
rec( 10, 4, EAHandleOrNetWareHandleOrVolume ),
rec( 14, 4, ReservedOrDirectoryNumber ),
rec( 18, 4, FileOffset ),
rec( 22, 4, InspectSize ),
rec( 26, (2,512), EAKey, info_str=(EAKey, "Read Extended Attribute: %s", ", %s") ),
])
pkt.Reply((26,536), [
rec( 8, 4, EAErrorCodes ),
rec( 12, 4, TtlValuesLength ),
rec( 16, 4, NewEAHandle ),
rec( 20, 4, EAAccessFlag ),
rec( 24, (2,512), EAValue ),
])
pkt.CompletionCodes([0x0000, 0x8800, 0x9c03, 0xc900, 0xce00, 0xcf00, 0xd101,
0xd301, 0xd503])
# 2222/5604, 86/04
pkt = NCP(0x5604, "Enumerate Extended Attribute", 'extended', has_length=0 )
pkt.Request((26,536), [
rec( 8, 2, EAFlags ),
rec( 10, 4, EAHandleOrNetWareHandleOrVolume ),
rec( 14, 4, ReservedOrDirectoryNumber ),
rec( 18, 4, InspectSize ),
rec( 22, 2, SequenceNumber ),
rec( 24, (2,512), EAKey, info_str=(EAKey, "Enumerate Extended Attribute: %s", ", %s") ),
])
pkt.Reply(28, [
rec( 8, 4, EAErrorCodes ),
rec( 12, 4, TtlEAs ),
rec( 16, 4, TtlEAsDataSize ),
rec( 20, 4, TtlEAsKeySize ),
rec( 24, 4, NewEAHandle ),
])
pkt.CompletionCodes([0x0000, 0x8800, 0x8c01, 0xc800, 0xc900, 0xce00, 0xcf00, 0xd101,
0xd301, 0xd503, 0xfb08, 0xff00])
# 2222/5605, 86/05
pkt = NCP(0x5605, "Duplicate Extended Attributes", 'extended', has_length=0 )
pkt.Request(28, [
rec( 8, 2, EAFlags ),
rec( 10, 2, DstEAFlags ),
rec( 12, 4, EAHandleOrNetWareHandleOrVolume ),
rec( 16, 4, ReservedOrDirectoryNumber ),
rec( 20, 4, EAHandleOrNetWareHandleOrVolume ),
rec( 24, 4, ReservedOrDirectoryNumber ),
])
pkt.Reply(20, [
rec( 8, 4, EADuplicateCount ),
rec( 12, 4, EADataSizeDuplicated ),
rec( 16, 4, EAKeySizeDuplicated ),
])
pkt.CompletionCodes([0x0000, 0x8800, 0xd101])
# 2222/5701, 87/01
pkt = NCP(0x5701, "Open/Create File or Subdirectory", 'file', has_length=0)
pkt.Request((30, 284), [
rec( 8, 1, NameSpace ),
rec( 9, 1, OpenCreateMode ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 4, AttributesDef32 ),
rec( 20, 2, DesiredAccessRights ),
rec( 22, 1, VolumeNumber ),
rec( 23, 4, DirectoryBase ),
rec( 27, 1, HandleFlag ),
rec( 28, 1, PathCount, var="x" ),
rec( 29, (1,255), Path, repeat="x", info_str=(Path, "Open or Create: %s", "/%s") ),
])
pkt.Reply( NO_LENGTH_CHECK, [
rec( 8, 4, FileHandle ),
rec( 12, 1, OpenCreateAction ),
rec( 13, 1, Reserved ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8001, 0x8101, 0x8401, 0x8501,
0x8701, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9400, 0x9600,
0x9804, 0x9900, 0x9b03, 0x9c03, 0xa500, 0xa802, 0xa901, 0xbf00, 0xfd00, 0xff16])
pkt.MakeExpert("file_rights")
# 2222/5702, 87/02
pkt = NCP(0x5702, "Initialize Search", 'file', has_length=0)
pkt.Request( (18,272), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 1, VolumeNumber ),
rec( 11, 4, DirectoryBase ),
rec( 15, 1, HandleFlag ),
rec( 16, 1, PathCount, var="x" ),
rec( 17, (1,255), Path, repeat="x", info_str=(Path, "Set Search Pointer to: %s", "/%s") ),
])
pkt.Reply(17, [
rec( 8, 1, VolumeNumber ),
rec( 9, 4, DirectoryNumber ),
rec( 13, 4, DirectoryEntryNumber ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
# 2222/5703, 87/03
pkt = NCP(0x5703, "Search for File or Subdirectory", 'file', has_length=0)
pkt.Request((26, 280), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 9, SeachSequenceStruct ),
rec( 25, (1,255), SearchPattern, info_str=(SearchPattern, "Search for: %s", "/%s") ),
])
pkt.Reply( NO_LENGTH_CHECK, [
rec( 8, 9, SeachSequenceStruct ),
rec( 17, 1, Reserved ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
srec( DStreamActual, req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamLogical, req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
# 2222/5704, 87/04
pkt = NCP(0x5704, "Rename Or Move a File or Subdirectory", 'file', has_length=0)
pkt.Request((28, 536), [
rec( 8, 1, NameSpace ),
rec( 9, 1, RenameFlag ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 1, VolumeNumber ),
rec( 13, 4, DirectoryBase ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, PathCount, var="x" ),
rec( 19, 1, VolumeNumber ),
rec( 20, 4, DirectoryBase ),
rec( 24, 1, HandleFlag ),
rec( 25, 1, PathCount, var="y" ),
rec( 26, (1, 255), Path, repeat="x", info_str=(Path, "Rename or Move: %s", "/%s") ),
rec( -1, (1,255), DestPath, repeat="y" ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9100, 0x9200, 0x9600,
0x9804, 0x9a00, 0x9b03, 0x9c03, 0x9e00, 0xa901, 0xbf00, 0xfd00, 0xff16])
# 2222/5705, 87/05
pkt = NCP(0x5705, "Scan File or Subdirectory for Trustees", 'file', has_length=0)
pkt.Request((24, 278), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 4, SequenceNumber ),
rec( 16, 1, VolumeNumber ),
rec( 17, 4, DirectoryBase ),
rec( 21, 1, HandleFlag ),
rec( 22, 1, PathCount, var="x" ),
rec( 23, (1, 255), Path, repeat="x", info_str=(Path, "Scan Trustees for: %s", "/%s") ),
])
pkt.Reply(20, [
rec( 8, 4, SequenceNumber ),
rec( 12, 2, ObjectIDCount, var="x" ),
rec( 14, 6, TrusteeStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c04, 0xa901, 0xbf00, 0xfd00, 0xff16])
# 2222/5706, 87/06
pkt = NCP(0x5706, "Obtain File or SubDirectory Information", 'file', has_length=0)
pkt.Request((24,278), [
rec( 10, 1, SrcNameSpace ),
rec( 11, 1, DestNameSpace ),
rec( 12, 2, SearchAttributesLow ),
rec( 14, 2, ReturnInfoMask, ENC_LITTLE_ENDIAN ),
rec( 16, 2, ExtendedInfo ),
rec( 18, 1, VolumeNumber ),
rec( 19, 4, DirectoryBase ),
rec( 23, 1, HandleFlag ),
rec( 24, 1, PathCount, var="x" ),
rec( 25, (1,255), Path, repeat="x", info_str=(Path, "Obtain Info for: %s", "/%s")),
])
pkt.Reply(NO_LENGTH_CHECK, [
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8700, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9802, 0x9b03, 0x9c03, 0xa802, 0xa901, 0xbf00, 0xfd00, 0xff16])
# 2222/5707, 87/07
pkt = NCP(0x5707, "Modify File or Subdirectory DOS Information", 'file', has_length=0)
pkt.Request((62,316), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ModifyDOSInfoMask ),
rec( 14, 2, Reserved2 ),
rec( 16, 2, AttributesDef16 ),
rec( 18, 1, FileMode ),
rec( 19, 1, FileExtendedAttributes ),
rec( 20, 2, CreationDate ),
rec( 22, 2, CreationTime ),
rec( 24, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 28, 2, ModifiedDate ),
rec( 30, 2, ModifiedTime ),
rec( 32, 4, ModifierID, ENC_BIG_ENDIAN ),
rec( 36, 2, ArchivedDate ),
rec( 38, 2, ArchivedTime ),
rec( 40, 4, ArchiverID, ENC_BIG_ENDIAN ),
rec( 44, 2, LastAccessedDate ),
rec( 46, 2, InheritedRightsMask ),
rec( 48, 2, InheritanceRevokeMask ),
rec( 50, 4, MaxSpace ),
rec( 54, 1, VolumeNumber ),
rec( 55, 4, DirectoryBase ),
rec( 59, 1, HandleFlag ),
rec( 60, 1, PathCount, var="x" ),
rec( 61, (1,255), Path, repeat="x", info_str=(Path, "Modify DOS Information for: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8c01, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
# 2222/5708, 87/08
pkt = NCP(0x5708, "Delete a File or Subdirectory", 'file', has_length=0)
pkt.Request((20,274), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 1, VolumeNumber ),
rec( 13, 4, DirectoryBase ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, PathCount, var="x" ),
rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Delete a File or Subdirectory: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8900, 0x8a00, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
# 2222/5709, 87/09
pkt = NCP(0x5709, "Set Short Directory Handle", 'file', has_length=0)
pkt.Request((20,274), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 1, DestDirHandle ),
rec( 11, 1, Reserved ),
rec( 12, 1, VolumeNumber ),
rec( 13, 4, DirectoryBase ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, PathCount, var="x" ),
rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Set Short Directory Handle to: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
# 2222/570A, 87/10
pkt = NCP(0x570A, "Add Trustee Set to File or Subdirectory", 'file', has_length=0)
pkt.Request((31,285), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, AccessRightsMaskWord ),
rec( 14, 2, ObjectIDCount, var="y" ),
rec( 16, 1, VolumeNumber ),
rec( 17, 4, DirectoryBase ),
rec( 21, 1, HandleFlag ),
rec( 22, 1, PathCount, var="x" ),
rec( 23, (1,255), Path, repeat="x", info_str=(Path, "Add Trustee Set to: %s", "/%s") ),
rec( -1, 7, TrusteeStruct, repeat="y" ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa802, 0xa901, 0xbf00, 0xfc01, 0xfd00, 0xff16])
# 2222/570B, 87/11
pkt = NCP(0x570B, "Delete Trustee Set from File or SubDirectory", 'file', has_length=0)
pkt.Request((27,281), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, ObjectIDCount, var="y" ),
rec( 12, 1, VolumeNumber ),
rec( 13, 4, DirectoryBase ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, PathCount, var="x" ),
rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Delete Trustee Set from: %s", "/%s") ),
rec( -1, 7, TrusteeStruct, repeat="y" ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/570C, 87/12
pkt = NCP(0x570C, "Allocate Short Directory Handle", 'file', has_length=0)
pkt.Request((20,274), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, AllocateMode ),
rec( 12, 1, VolumeNumber ),
rec( 13, 4, DirectoryBase ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, PathCount, var="x" ),
rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Allocate Short Directory Handle to: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
srec( ReplyLevel2Struct, req_cond="ncp.alloc_reply_lvl2 == TRUE" ),
srec( ReplyLevel1Struct, req_cond="ncp.alloc_reply_lvl2 == FALSE" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0x9d00, 0xa901, 0xbf00, 0xfd00, 0xff16])
# 2222/5710, 87/16
pkt = NCP(0x5710, "Scan Salvageable Files", 'file', has_length=0)
pkt.Request((26,280), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 2, ReturnInfoMask ),
rec( 12, 2, ExtendedInfo ),
rec( 14, 4, SequenceNumber ),
rec( 18, 1, VolumeNumber ),
rec( 19, 4, DirectoryBase ),
rec( 23, 1, HandleFlag ),
rec( 24, 1, PathCount, var="x" ),
rec( 25, (1,255), Path, repeat="x", info_str=(Path, "Scan for Deleted Files in: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 4, SequenceNumber ),
rec( 12, 2, DeletedTime ),
rec( 14, 2, DeletedDate ),
rec( 16, 4, DeletedID, ENC_BIG_ENDIAN ),
rec( 20, 4, VolumeID ),
rec( 24, 4, DirectoryBase ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5711, 87/17
pkt = NCP(0x5711, "Recover Salvageable File", 'file', has_length=0)
pkt.Request((23,277), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 4, SequenceNumber ),
rec( 14, 4, VolumeID ),
rec( 18, 4, DirectoryBase ),
rec( 22, (1,255), FileName, info_str=(FileName, "Recover Deleted File: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa802, 0xbf00, 0xfe02, 0xfd00, 0xff16])
# 2222/5712, 87/18
pkt = NCP(0x5712, "Purge Salvageable Files", 'file', has_length=0)
pkt.Request(22, [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 4, SequenceNumber ),
rec( 14, 4, VolumeID ),
rec( 18, 4, DirectoryBase ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x010a, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5713, 87/19
pkt = NCP(0x5713, "Get Name Space Information", 'file', has_length=0)
pkt.Request(18, [
rec( 8, 1, SrcNameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 1, Reserved ),
rec( 11, 1, VolumeNumber ),
rec( 12, 4, DirectoryBase ),
rec( 16, 2, NamesSpaceInfoMask ),
])
pkt.Reply(NO_LENGTH_CHECK, [
srec( FileNameStruct, req_cond="ncp.ns_info_mask_modify == TRUE" ),
srec( FileAttributesStruct, req_cond="ncp.ns_info_mask_fatt == TRUE" ),
srec( CreationDateStruct, req_cond="ncp.ns_info_mask_cdate == TRUE" ),
srec( CreationTimeStruct, req_cond="ncp.ns_info_mask_ctime == TRUE" ),
srec( OwnerIDStruct, req_cond="ncp.ns_info_mask_owner == TRUE" ),
srec( ArchiveDateStruct, req_cond="ncp.ns_info_mask_adate == TRUE" ),
srec( ArchiveTimeStruct, req_cond="ncp.ns_info_mask_atime == TRUE" ),
srec( ArchiveIdStruct, req_cond="ncp.ns_info_mask_aid == TRUE" ),
srec( UpdateDateStruct, req_cond="ncp.ns_info_mask_udate == TRUE" ),
srec( UpdateTimeStruct, req_cond="ncp.ns_info_mask_utime == TRUE" ),
srec( UpdateIDStruct, req_cond="ncp.ns_info_mask_uid == TRUE" ),
srec( LastAccessStruct, req_cond="ncp.ns_info_mask_acc_date == TRUE" ),
srec( RightsInfoStruct, req_cond="ncp.ns_info_mask_max_acc_mask == TRUE" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5714, 87/20
pkt = NCP(0x5714, "Search for File or Subdirectory Set", 'file', has_length=0)
pkt.Request((27, 27), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 2, ReturnInfoCount ),
rec( 18, 9, SeachSequenceStruct ),
# rec( 27, (1,255), SearchPattern ),
])
# The reply packet is dissected in packet-ncp2222.inc
pkt.Reply(NO_LENGTH_CHECK, [
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xbf00, 0xfd00, 0xff16])
# 2222/5715, 87/21
pkt = NCP(0x5715, "Get Path String from Short Directory Handle", 'file', has_length=0)
pkt.Request(10, [
rec( 8, 1, NameSpace ),
rec( 9, 1, DirHandle ),
])
pkt.Reply((9,263), [
rec( 8, (1,255), Path ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
# 2222/5716, 87/22
pkt = NCP(0x5716, "Generate Directory Base and Volume Number", 'file', has_length=0)
pkt.Request((20,274), [
rec( 8, 1, SrcNameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 2, dstNSIndicator ),
rec( 12, 1, VolumeNumber ),
rec( 13, 4, DirectoryBase ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, PathCount, var="x" ),
rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Get Volume and Directory Base from: %s", "/%s") ),
])
pkt.Reply(17, [
rec( 8, 4, DirectoryBase ),
rec( 12, 4, DOSDirectoryBase ),
rec( 16, 1, VolumeNumber ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5717, 87/23
pkt = NCP(0x5717, "Query Name Space Information Format", 'file', has_length=0)
pkt.Request(10, [
rec( 8, 1, NameSpace ),
rec( 9, 1, VolumeNumber ),
])
pkt.Reply(58, [
rec( 8, 4, FixedBitMask ),
rec( 12, 4, VariableBitMask ),
rec( 16, 4, HugeBitMask ),
rec( 20, 2, FixedBitsDefined ),
rec( 22, 2, VariableBitsDefined ),
rec( 24, 2, HugeBitsDefined ),
rec( 26, 32, FieldsLenTable ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5718, 87/24
pkt = NCP(0x5718, "Get Name Spaces Loaded List from Volume Number", 'file', has_length=0)
pkt.Request(11, [
rec( 8, 2, Reserved2 ),
rec( 10, 1, VolumeNumber, info_str=(VolumeNumber, "Get Name Spaces Loaded List from Vol: %d", "/%d") ),
])
pkt.Reply(11, [
rec( 8, 2, NumberOfNSLoaded, var="x" ),
rec( 10, 1, NameSpace, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5719, 87/25
pkt = NCP(0x5719, "Set Name Space Information", 'file', has_length=0)
pkt.Request(531, [
rec( 8, 1, SrcNameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 1, VolumeNumber ),
rec( 11, 4, DirectoryBase ),
rec( 15, 2, NamesSpaceInfoMask ),
rec( 17, 2, Reserved2 ),
rec( 19, 512, NSSpecificInfo ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00,
0xff16])
# 2222/571A, 87/26
pkt = NCP(0x571A, "Get Huge Name Space Information", 'file', has_length=0)
pkt.Request(34, [
rec( 8, 1, NameSpace ),
rec( 9, 1, VolumeNumber ),
rec( 10, 4, DirectoryBase ),
rec( 14, 4, HugeBitMask ),
rec( 18, 16, HugeStateInfo ),
])
pkt.Reply((25,279), [
rec( 8, 16, NextHugeStateInfo ),
rec( 24, (1,255), HugeData ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00,
0xff16])
# 2222/571B, 87/27
pkt = NCP(0x571B, "Set Huge Name Space Information", 'file', has_length=0)
pkt.Request((35,289), [
rec( 8, 1, NameSpace ),
rec( 9, 1, VolumeNumber ),
rec( 10, 4, DirectoryBase ),
rec( 14, 4, HugeBitMask ),
rec( 18, 16, HugeStateInfo ),
rec( 34, (1,255), HugeData ),
])
pkt.Reply(28, [
rec( 8, 16, NextHugeStateInfo ),
rec( 24, 4, HugeDataUsed ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00,
0xff16])
# 2222/571C, 87/28
pkt = NCP(0x571C, "Get Full Path String", 'file', has_length=0)
pkt.Request((28,282), [
rec( 8, 1, SrcNameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 2, PathCookieFlags ),
rec( 12, 4, Cookie1 ),
rec( 16, 4, Cookie2 ),
rec( 20, 1, VolumeNumber ),
rec( 21, 4, DirectoryBase ),
rec( 25, 1, HandleFlag ),
rec( 26, 1, PathCount, var="x" ),
rec( 27, (1,255), Path, repeat="x", info_str=(Path, "Get Full Path from: %s", "/%s") ),
])
pkt.Reply((23,277), [
rec( 8, 2, PathCookieFlags ),
rec( 10, 4, Cookie1 ),
rec( 14, 4, Cookie2 ),
rec( 18, 2, PathComponentSize ),
rec( 20, 2, PathComponentCount, var='x' ),
rec( 22, (1,255), Path, repeat='x' ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
0x9600, 0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00,
0xff16])
# 2222/571D, 87/29
pkt = NCP(0x571D, "Get Effective Directory Rights", 'file', has_length=0)
pkt.Request((24, 278), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 1, VolumeNumber ),
rec( 17, 4, DirectoryBase ),
rec( 21, 1, HandleFlag ),
rec( 22, 1, PathCount, var="x" ),
rec( 23, (1,255), Path, repeat="x", info_str=(Path, "Get Effective Rights for: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 2, EffectiveRights ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/571E, 87/30
pkt = NCP(0x571E, "Open/Create File or Subdirectory", 'file', has_length=0)
pkt.Request((34, 288), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 1, OpenCreateMode ),
rec( 11, 1, Reserved ),
rec( 12, 2, SearchAttributesLow ),
rec( 14, 2, Reserved2 ),
rec( 16, 2, ReturnInfoMask ),
rec( 18, 2, ExtendedInfo ),
rec( 20, 4, AttributesDef32 ),
rec( 24, 2, DesiredAccessRights ),
rec( 26, 1, VolumeNumber ),
rec( 27, 4, DirectoryBase ),
rec( 31, 1, HandleFlag ),
rec( 32, 1, PathCount, var="x" ),
rec( 33, (1,255), Path, repeat="x", info_str=(Path, "Open or Create File: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec( 12, 1, OpenCreateAction ),
rec( 13, 1, Reserved ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbe00, 0xbf00, 0xfd00, 0xff16])
pkt.MakeExpert("file_rights")
# 2222/571F, 87/31
pkt = NCP(0x571F, "Get File Information", 'file', has_length=0)
pkt.Request(15, [
rec( 8, 6, FileHandle, info_str=(FileHandle, "Get File Information - 0x%s", ", %s") ),
rec( 14, 1, HandleInfoLevel ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 4, VolumeNumberLong ),
rec( 12, 4, DirectoryBase ),
srec(HandleInfoLevel0, req_cond="ncp.handle_info_level==0x00" ),
srec(HandleInfoLevel1, req_cond="ncp.handle_info_level==0x01" ),
srec(HandleInfoLevel2, req_cond="ncp.handle_info_level==0x02" ),
srec(HandleInfoLevel3, req_cond="ncp.handle_info_level==0x03" ),
srec(HandleInfoLevel4, req_cond="ncp.handle_info_level==0x04" ),
srec(HandleInfoLevel5, req_cond="ncp.handle_info_level==0x05" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5720, 87/32
pkt = NCP(0x5720, "Open/Create File or Subdirectory with Callback", 'file', has_length=0)
pkt.Request((30, 284), [
rec( 8, 1, NameSpace ),
rec( 9, 1, OpenCreateMode ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 4, AttributesDef32 ),
rec( 20, 2, DesiredAccessRights ),
rec( 22, 1, VolumeNumber ),
rec( 23, 4, DirectoryBase ),
rec( 27, 1, HandleFlag ),
rec( 28, 1, PathCount, var="x" ),
rec( 29, (1,255), Path, repeat="x", info_str=(Path, "Open or Create with Op-Lock: %s", "/%s") ),
])
pkt.Reply( NO_LENGTH_CHECK, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec( 12, 1, OpenCreateAction ),
rec( 13, 1, OCRetFlags ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
pkt.MakeExpert("file_rights")
# 2222/5721, 87/33
pkt = NCP(0x5721, "Open/Create File or Subdirectory II with Callback", 'file', has_length=0)
pkt.Request((34, 288), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 1, OpenCreateMode ),
rec( 11, 1, Reserved ),
rec( 12, 2, SearchAttributesLow ),
rec( 14, 2, Reserved2 ),
rec( 16, 2, ReturnInfoMask ),
rec( 18, 2, ExtendedInfo ),
rec( 20, 4, AttributesDef32 ),
rec( 24, 2, DesiredAccessRights ),
rec( 26, 1, VolumeNumber ),
rec( 27, 4, DirectoryBase ),
rec( 31, 1, HandleFlag ),
rec( 32, 1, PathCount, var="x" ),
rec( 33, (1,255), Path, repeat="x", info_str=(Path, "Open or Create II with Op-Lock: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 4, FileHandle ),
rec( 12, 1, OpenCreateAction ),
rec( 13, 1, OCRetFlags ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( DOSNameStruct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( FileNameStruct, req_cond="ncp.ret_info_mask_fname == 1" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbe00, 0xbf00, 0xfd00, 0xff16])
pkt.MakeExpert("file_rights")
# 2222/5722, 87/34
pkt = NCP(0x5722, "Open CallBack Control (Op-Lock)", 'file', has_length=0)
pkt.Request(13, [
rec( 10, 4, CCFileHandle, ENC_BIG_ENDIAN ),
rec( 14, 1, CCFunction ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8800, 0xff16])
pkt.MakeExpert("ncp5722_request")
# 2222/5723, 87/35
pkt = NCP(0x5723, "Modify DOS Attributes on a File or Subdirectory", 'file', has_length=0)
pkt.Request((28, 282), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Flags ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 4, AttributesDef32 ),
rec( 20, 1, VolumeNumber ),
rec( 21, 4, DirectoryBase ),
rec( 25, 1, HandleFlag ),
rec( 26, 1, PathCount, var="x" ),
rec( 27, (1,255), Path, repeat="x", info_str=(Path, "Modify DOS Attributes for: %s", "/%s") ),
])
pkt.Reply(24, [
rec( 8, 4, ItemsChecked ),
rec( 12, 4, ItemsChanged ),
rec( 16, 4, AttributeValidFlag ),
rec( 20, 4, AttributesDef32 ),
])
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5724, 87/36
pkt = NCP(0x5724, "Log File", 'sync', has_length=0)
pkt.Request((28, 282), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, Reserved2 ),
rec( 12, 1, LogFileFlagLow ),
rec( 13, 1, LogFileFlagHigh ),
rec( 14, 2, Reserved2 ),
rec( 16, 4, WaitTime ),
rec( 20, 1, VolumeNumber ),
rec( 21, 4, DirectoryBase ),
rec( 25, 1, HandleFlag ),
rec( 26, 1, PathCount, var="x" ),
rec( 27, (1,255), Path, repeat="x", info_str=(Path, "Lock File: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5725, 87/37
pkt = NCP(0x5725, "Release File", 'sync', has_length=0)
pkt.Request((20, 274), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, Reserved2 ),
rec( 12, 1, VolumeNumber ),
rec( 13, 4, DirectoryBase ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, PathCount, var="x" ),
rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Release Lock on: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5726, 87/38
pkt = NCP(0x5726, "Clear File", 'sync', has_length=0)
pkt.Request((20, 274), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, Reserved2 ),
rec( 12, 1, VolumeNumber ),
rec( 13, 4, DirectoryBase ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, PathCount, var="x" ),
rec( 19, (1,255), Path, repeat="x", info_str=(Path, "Clear File: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5727, 87/39
pkt = NCP(0x5727, "Get Directory Disk Space Restriction", 'file', has_length=0)
pkt.Request((19, 273), [
rec( 8, 1, NameSpace ),
rec( 9, 2, Reserved2 ),
rec( 11, 1, VolumeNumber ),
rec( 12, 4, DirectoryBase ),
rec( 16, 1, HandleFlag ),
rec( 17, 1, PathCount, var="x" ),
rec( 18, (1,255), Path, repeat="x", info_str=(Path, "Get Disk Space Restriction for: %s", "/%s") ),
])
pkt.Reply(18, [
rec( 8, 1, NumberOfEntries, var="x" ),
rec( 9, 9, SpaceStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00,
0xff16])
# 2222/5728, 87/40
pkt = NCP(0x5728, "Search for File or Subdirectory Set (Extended Errors)", 'file', has_length=0)
pkt.Request((28, 282), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 2, ReturnInfoCount ),
rec( 18, 9, SeachSequenceStruct ),
rec( 27, (1,255), SearchPattern, info_str=(SearchPattern, "Search for: %s", ", %s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 9, SeachSequenceStruct ),
rec( 17, 1, MoreFlag ),
rec( 18, 2, InfoCount ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( FileNameStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/5729, 87/41
pkt = NCP(0x5729, "Scan Salvageable Files", 'file', has_length=0)
pkt.Request((24,278), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, CtrlFlags, ENC_LITTLE_ENDIAN ),
rec( 12, 4, SequenceNumber ),
rec( 16, 1, VolumeNumber ),
rec( 17, 4, DirectoryBase ),
rec( 21, 1, HandleFlag ),
rec( 22, 1, PathCount, var="x" ),
rec( 23, (1,255), Path, repeat="x", info_str=(Path, "Scan Deleted Files: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 4, SequenceNumber ),
rec( 12, 4, DirectoryBase ),
rec( 16, 4, ScanItems, var="x" ),
srec(ScanInfoFileName, req_cond="ncp.ctrl_flags==0x0001", repeat="x" ),
srec(ScanInfoFileNoName, req_cond="ncp.ctrl_flags==0x0000", repeat="x" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/572A, 87/42
pkt = NCP(0x572A, "Purge Salvageable File List", 'file', has_length=0)
pkt.Request(28, [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, PurgeFlags ),
rec( 12, 4, VolumeNumberLong ),
rec( 16, 4, DirectoryBase ),
rec( 20, 4, PurgeCount, var="x" ),
rec( 24, 4, PurgeList, repeat="x" ),
])
pkt.Reply(16, [
rec( 8, 4, PurgeCount, var="x" ),
rec( 12, 4, PurgeCcode, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/572B, 87/43
pkt = NCP(0x572B, "Revoke File Handle Rights", 'file', has_length=0)
pkt.Request(17, [
rec( 8, 3, Reserved3 ),
rec( 11, 1, RevQueryFlag ),
rec( 12, 4, FileHandle ),
rec( 16, 1, RemoveOpenRights ),
])
pkt.Reply(13, [
rec( 8, 4, FileHandle ),
rec( 12, 1, OpenRights ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
# 2222/572C, 87/44
pkt = NCP(0x572C, "Update File Handle Rights", 'file', has_length=0)
pkt.Request(24, [
rec( 8, 2, Reserved2 ),
rec( 10, 1, VolumeNumber ),
rec( 11, 1, NameSpace ),
rec( 12, 4, DirectoryNumber ),
rec( 16, 2, AccessRightsMaskWord ),
rec( 18, 2, NewAccessRights ),
rec( 20, 4, FileHandle, ENC_BIG_ENDIAN ),
])
pkt.Reply(16, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec( 12, 4, EffectiveRights, ENC_LITTLE_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff16])
pkt.MakeExpert("ncp572c")
# 2222/5740, 87/64
pkt = NCP(0x5740, "Read from File", 'file', has_length=0)
pkt.Request(22, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
rec( 20, 2, NumBytes, ENC_BIG_ENDIAN ),
])
pkt.Reply(10, [
rec( 8, 2, NumBytes, ENC_BIG_ENDIAN),
])
pkt.CompletionCodes([0x0000, 0x8300, 0x8800, 0x9300, 0x9500, 0xa201, 0xfd00, 0xff1b])
# 2222/5741, 87/65
pkt = NCP(0x5741, "Write to File", 'file', has_length=0)
pkt.Request(22, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
rec( 20, 2, NumBytes, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8300, 0x8800, 0x9400, 0x9500, 0xa201, 0xfd00, 0xff1b])
# 2222/5742, 87/66
pkt = NCP(0x5742, "Get Current Size of File", 'file', has_length=0)
pkt.Request(12, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
])
pkt.Reply(16, [
rec( 8, 8, FileSize64bit),
])
pkt.CompletionCodes([0x0000, 0x7f00, 0x8800, 0x9600, 0xfd02, 0xff01])
# 2222/5743, 87/67
pkt = NCP(0x5743, "Log Physical Record", 'file', has_length=0)
pkt.Request(36, [
rec( 8, 4, LockFlag, ENC_BIG_ENDIAN ),
rec(12, 4, FileHandle, ENC_BIG_ENDIAN ),
rec(16, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
rec(24, 8, Length64bit, ENC_BIG_ENDIAN ),
rec(32, 4, LockTimeout, ENC_BIG_ENDIAN),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7f00, 0x8800, 0x9600, 0xfb08, 0xfd02, 0xff01])
# 2222/5744, 87/68
pkt = NCP(0x5744, "Release Physical Record", 'file', has_length=0)
pkt.Request(28, [
rec(8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec(12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
rec(20, 8, Length64bit, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff1a])
# 2222/5745, 87/69
pkt = NCP(0x5745, "Clear Physical Record", 'file', has_length=0)
pkt.Request(28, [
rec(8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec(12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
rec(20, 8, Length64bit, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xbf00, 0xfd00, 0xff1a])
# 2222/5746, 87/70
pkt = NCP(0x5746, "Copy from One File to Another (64 Bit offset capable)", 'file', has_length=0)
pkt.Request(44, [
rec(8, 6, SourceFileHandle, ENC_BIG_ENDIAN ),
rec(14, 6, TargetFileHandle, ENC_BIG_ENDIAN ),
rec(20, 8, SourceFileOffset, ENC_BIG_ENDIAN ),
rec(28, 8, TargetFileOffset64bit, ENC_BIG_ENDIAN ),
rec(36, 8, BytesToCopy64bit, ENC_BIG_ENDIAN ),
])
pkt.Reply(16, [
rec( 8, 8, BytesActuallyTransferred64bit, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000, 0x0104, 0x8301, 0x8800, 0x9300, 0x9400,
0x9500, 0x9600, 0xa201])
# 2222/5747, 87/71
pkt = NCP(0x5747, "Get Sparse File Data Block Bit Map", 'file', has_length=0)
pkt.Request(23, [
rec(8, 6, SourceFileHandle, ENC_BIG_ENDIAN ),
rec(14, 8, SourceFileOffset, ENC_BIG_ENDIAN ),
rec(22, 1, ExtentListFormat, ENC_BIG_ENDIAN ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 1, ExtentListFormat ),
rec( 9, 1, RetExtentListCount, var="x" ),
rec( 10, 8, EndingOffset ),
srec(zFileMap_Allocation, req_cond="ncp.ext_lst_format==0", repeat="x" ),
srec(zFileMap_Logical, req_cond="ncp.ext_lst_format==1", repeat="x" ),
srec(zFileMap_Physical, req_cond="ncp.ext_lst_format==2", repeat="x" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8800, 0xff00])
# 2222/5748, 87/72
pkt = NCP(0x5748, "Read a File", 'file', has_length=0)
pkt.Request(24, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
rec( 20, 4, NumBytesLong, ENC_BIG_ENDIAN ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 4, NumBytesLong, ENC_BIG_ENDIAN),
rec( 12, PROTO_LENGTH_UNKNOWN, Data64),
#decoded in packet-ncp2222.inc
# rec( NumBytesLong, 4, BytesActuallyTransferred64bit, ENC_BIG_ENDIAN),
])
pkt.CompletionCodes([0x0000, 0x8300, 0x8800, 0x9300, 0x9500, 0xa201, 0xfd00, 0xff1b])
# 2222/5749, 87/73
pkt = NCP(0x5749, "Write to a File", 'file', has_length=0)
pkt.Request(24, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec( 12, 8, StartOffset64bit, ENC_BIG_ENDIAN ),
rec( 20, 4, NumBytesLong, ENC_BIG_ENDIAN ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8300, 0x8800, 0x9400, 0x9500, 0xa201, 0xfd00, 0xff1b])
# 2222/5801, 8801
pkt = NCP(0x5801, "Query Volume Audit Status", "auditing", has_length=0)
pkt.Request(12, [
rec( 8, 4, ConnectionNumber ),
])
pkt.Reply(40, [
rec(8, 32, NWAuditStatus ),
])
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5802, 8802
pkt = NCP(0x5802, "Add User Audit Property", "auditing", has_length=0)
pkt.Request(25, [
rec(8, 4, AuditIDType ),
rec(12, 4, AuditID ),
rec(16, 4, AuditHandle ),
rec(20, 4, ObjectID ),
rec(24, 1, AuditFlag ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5803, 8803
pkt = NCP(0x5803, "Add Auditor Access", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xde00, 0xfd00, 0xff16])
# 2222/5804, 8804
pkt = NCP(0x5804, "Change Auditor Volume Password", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5805, 8805
pkt = NCP(0x5805, "Check Auditor Access", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5806, 8806
pkt = NCP(0x5806, "Delete User Audit Property", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff21])
# 2222/5807, 8807
pkt = NCP(0x5807, "Disable Auditing On A Volume", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5808, 8808
pkt = NCP(0x5808, "Enable Auditing On A Volume", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xde00, 0xfd00, 0xff16])
# 2222/5809, 8809
pkt = NCP(0x5809, "Query User Being Audited", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/580A, 88,10
pkt = NCP(0x580A, "Read Audit Bit Map", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/580B, 88,11
pkt = NCP(0x580B, "Read Audit File Configuration Header", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/580D, 88,13
pkt = NCP(0x580D, "Remove Auditor Access", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/580E, 88,14
pkt = NCP(0x580E, "Reset Audit File", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/580F, 88,15
pkt = NCP(0x580F, "Auditing NCP", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfb00, 0xfd00, 0xff16])
# 2222/5810, 88,16
pkt = NCP(0x5810, "Write Audit Bit Map", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5811, 88,17
pkt = NCP(0x5811, "Write Audit File Configuration Header", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5812, 88,18
pkt = NCP(0x5812, "Change Auditor Volume Password2", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5813, 88,19
pkt = NCP(0x5813, "Return Audit Flags", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5814, 88,20
pkt = NCP(0x5814, "Close Old Audit File", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5816, 88,22
pkt = NCP(0x5816, "Check Level Two Access", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xde00, 0xfd00, 0xff16])
# 2222/5817, 88,23
pkt = NCP(0x5817, "Return Old Audit File List", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5818, 88,24
pkt = NCP(0x5818, "Init Audit File Reads", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5819, 88,25
pkt = NCP(0x5819, "Read Auditing File", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/581A, 88,26
pkt = NCP(0x581A, "Delete Old Audit File", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/581E, 88,30
pkt = NCP(0x581E, "Restart Volume auditing", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/581F, 88,31
pkt = NCP(0x581F, "Set Volume Password", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0106, 0x7300, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa600, 0xa700, 0xa801, 0xbe00, 0xfd00, 0xff16])
# 2222/5901, 89,01
pkt = NCP(0x5901, "Open/Create File or Subdirectory", "enhanced", has_length=0)
pkt.Request((37,290), [
rec( 8, 1, NameSpace ),
rec( 9, 1, OpenCreateMode ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 4, AttributesDef32 ),
rec( 20, 2, DesiredAccessRights ),
rec( 22, 4, DirectoryBase ),
rec( 26, 1, VolumeNumber ),
rec( 27, 1, HandleFlag ),
rec( 28, 1, DataTypeFlag ),
rec( 29, 5, Reserved5 ),
rec( 34, 1, PathCount, var="x" ),
rec( 35, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create File or Subdirectory: %s", "/%s") ),
])
pkt.Reply( NO_LENGTH_CHECK, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec( 12, 1, OpenCreateAction ),
rec( 13, 1, Reserved ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9900, 0x9b03, 0x9c03, 0xa901, 0xa500, 0xaa00, 0xbf00, 0xfd00, 0xff16])
pkt.MakeExpert("file_rights")
# 2222/5902, 89/02
pkt = NCP(0x5902, "Initialize Search", 'enhanced', has_length=0)
pkt.Request( (25,278), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 4, DirectoryBase ),
rec( 14, 1, VolumeNumber ),
rec( 15, 1, HandleFlag ),
rec( 16, 1, DataTypeFlag ),
rec( 17, 5, Reserved5 ),
rec( 22, 1, PathCount, var="x" ),
rec( 23, (2,255), Path16, repeat="x", info_str=(Path16, "Set Search Pointer to: %s", "/%s") ),
])
pkt.Reply(17, [
rec( 8, 1, VolumeNumber ),
rec( 9, 4, DirectoryNumber ),
rec( 13, 4, DirectoryEntryNumber ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5903, 89/03
pkt = NCP(0x5903, "Search for File or Subdirectory", 'enhanced', has_length=0)
pkt.Request(26, [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 9, SeachSequenceStruct ),
rec( 25, 1, DataTypeFlag ),
# next field is dissected in packet-ncp2222.inc
#rec( 26, (2,255), SearchPattern16, info_str=(SearchPattern16, "Search for: %s", "/%s") ),
])
pkt.Reply( NO_LENGTH_CHECK, [
rec( 8, 9, SeachSequenceStruct ),
rec( 17, 1, Reserved ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5904, 89/04
pkt = NCP(0x5904, "Rename Or Move a File or Subdirectory", 'enhanced', has_length=0)
pkt.Request((42, 548), [
rec( 8, 1, NameSpace ),
rec( 9, 1, RenameFlag ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 12, SrcEnhNWHandlePathS1 ),
rec( 24, 1, PathCount, var="x" ),
rec( 25, 12, DstEnhNWHandlePathS1 ),
rec( 37, 1, PathCount, var="y" ),
rec( 38, (2, 255), Path16, repeat="x", info_str=(Path16, "Rename or Move: %s", "/%s") ),
rec( -1, (2,255), DestPath16, repeat="y" ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9200, 0x9600,
0x9804, 0x9a00, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5905, 89/05
pkt = NCP(0x5905, "Scan File or Subdirectory for Trustees", 'enhanced', has_length=0)
pkt.Request((31, 284), [
rec( 8, 1, NameSpace ),
rec( 9, 1, MaxReplyObjectIDCount ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 4, SequenceNumber ),
rec( 16, 4, DirectoryBase ),
rec( 20, 1, VolumeNumber ),
rec( 21, 1, HandleFlag ),
rec( 22, 1, DataTypeFlag ),
rec( 23, 5, Reserved5 ),
rec( 28, 1, PathCount, var="x" ),
rec( 29, (2, 255), Path16, repeat="x", info_str=(Path16, "Scan Trustees for: %s", "/%s") ),
])
pkt.Reply(20, [
rec( 8, 4, SequenceNumber ),
rec( 12, 2, ObjectIDCount, var="x" ),
rec( 14, 6, TrusteeStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5906, 89/06
pkt = NCP(0x5906, "Obtain File or SubDirectory Information", 'enhanced', has_length=0)
pkt.Request((22), [
rec( 8, 1, SrcNameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask, ENC_LITTLE_ENDIAN ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 4, DirectoryBase ),
rec( 20, 1, VolumeNumber ),
rec( 21, 1, HandleFlag ),
#
# Move to packet-ncp2222.inc
# The datatype flag indicates if the path is represented as ASCII or UTF8
# ASCII has a 1 byte count field whereas UTF8 has a two byte count field.
#
#rec( 22, 1, DataTypeFlag ),
#rec( 23, 5, Reserved5 ),
#rec( 28, 1, PathCount, var="x" ),
#rec( 29, (2,255), Path16, repeat="x", info_str=(Path16, "Obtain Info for: %s", "/%s")),
])
pkt.Reply(NO_LENGTH_CHECK, [
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8700, 0x8900, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa802, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5907, 89/07
pkt = NCP(0x5907, "Modify File or Subdirectory DOS Information", 'enhanced', has_length=0)
pkt.Request((69,322), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ModifyDOSInfoMask ),
rec( 14, 2, Reserved2 ),
rec( 16, 2, AttributesDef16 ),
rec( 18, 1, FileMode ),
rec( 19, 1, FileExtendedAttributes ),
rec( 20, 2, CreationDate ),
rec( 22, 2, CreationTime ),
rec( 24, 4, CreatorID, ENC_BIG_ENDIAN ),
rec( 28, 2, ModifiedDate ),
rec( 30, 2, ModifiedTime ),
rec( 32, 4, ModifierID, ENC_BIG_ENDIAN ),
rec( 36, 2, ArchivedDate ),
rec( 38, 2, ArchivedTime ),
rec( 40, 4, ArchiverID, ENC_BIG_ENDIAN ),
rec( 44, 2, LastAccessedDate ),
rec( 46, 2, InheritedRightsMask ),
rec( 48, 2, InheritanceRevokeMask ),
rec( 50, 4, MaxSpace ),
rec( 54, 4, DirectoryBase ),
rec( 58, 1, VolumeNumber ),
rec( 59, 1, HandleFlag ),
rec( 60, 1, DataTypeFlag ),
rec( 61, 5, Reserved5 ),
rec( 66, 1, PathCount, var="x" ),
rec( 67, (2,255), Path16, repeat="x", info_str=(Path16, "Modify DOS Information for: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x7902, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8c01, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5908, 89/08
pkt = NCP(0x5908, "Delete a File or Subdirectory", 'enhanced', has_length=0)
pkt.Request((27,280), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 4, DirectoryBase ),
rec( 16, 1, VolumeNumber ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, DataTypeFlag ),
rec( 19, 5, Reserved5 ),
rec( 24, 1, PathCount, var="x" ),
rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Delete a File or Subdirectory: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8900, 0x8a00, 0x8d00, 0x8e00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5909, 89/09
pkt = NCP(0x5909, "Set Short Directory Handle", 'enhanced', has_length=0)
pkt.Request((27,280), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 1, DestDirHandle ),
rec( 11, 1, Reserved ),
rec( 12, 4, DirectoryBase ),
rec( 16, 1, VolumeNumber ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, DataTypeFlag ),
rec( 19, 5, Reserved5 ),
rec( 24, 1, PathCount, var="x" ),
rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Set Short Directory Handle to: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/590A, 89/10
pkt = NCP(0x590A, "Add Trustee Set to File or Subdirectory", 'enhanced', has_length=0)
pkt.Request((37,290), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, AccessRightsMaskWord ),
rec( 14, 2, ObjectIDCount, var="y" ),
rec( -1, 6, TrusteeStruct, repeat="y" ),
rec( -1, 4, DirectoryBase ),
rec( -1, 1, VolumeNumber ),
rec( -1, 1, HandleFlag ),
rec( -1, 1, DataTypeFlag ),
rec( -1, 5, Reserved5 ),
rec( -1, 1, PathCount, var="x" ),
rec( -1, (2,255), Path16, repeat="x", info_str=(Path16, "Add Trustee Set to: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfc01, 0xfd00, 0xff16])
# 2222/590B, 89/11
pkt = NCP(0x590B, "Delete Trustee Set from File or SubDirectory", 'enhanced', has_length=0)
pkt.Request((34,287), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 2, ObjectIDCount, var="y" ),
rec( 12, 7, TrusteeStruct, repeat="y" ),
rec( 19, 4, DirectoryBase ),
rec( 23, 1, VolumeNumber ),
rec( 24, 1, HandleFlag ),
rec( 25, 1, DataTypeFlag ),
rec( 26, 5, Reserved5 ),
rec( 31, 1, PathCount, var="x" ),
rec( 32, (2,255), Path16, repeat="x", info_str=(Path16, "Delete Trustee Set from: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8c01, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/590C, 89/12
pkt = NCP(0x590C, "Allocate Short Directory Handle", 'enhanced', has_length=0)
pkt.Request((27,280), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 2, AllocateMode ),
rec( 12, 4, DirectoryBase ),
rec( 16, 1, VolumeNumber ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, DataTypeFlag ),
rec( 19, 5, Reserved5 ),
rec( 24, 1, PathCount, var="x" ),
rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Allocate Short Directory Handle to: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
srec( ReplyLevel2Struct, req_cond="ncp.alloc_reply_lvl2 == TRUE" ),
srec( ReplyLevel1Struct, req_cond="ncp.alloc_reply_lvl2 == FALSE" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5910, 89/16
pkt = NCP(0x5910, "Scan Salvageable Files", 'enhanced', has_length=0)
pkt.Request((33,286), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 2, ReturnInfoMask ),
rec( 12, 2, ExtendedInfo ),
rec( 14, 4, SequenceNumber ),
rec( 18, 4, DirectoryBase ),
rec( 22, 1, VolumeNumber ),
rec( 23, 1, HandleFlag ),
rec( 24, 1, DataTypeFlag ),
rec( 25, 5, Reserved5 ),
rec( 30, 1, PathCount, var="x" ),
rec( 31, (2,255), Path16, repeat="x", info_str=(Path16, "Scan for Deleted Files in: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 4, SequenceNumber ),
rec( 12, 2, DeletedTime ),
rec( 14, 2, DeletedDate ),
rec( 16, 4, DeletedID, ENC_BIG_ENDIAN ),
rec( 20, 4, VolumeID ),
rec( 24, 4, DirectoryBase ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( ReferenceIDStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_id == 1)" ),
srec( NSAttributeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns_attr == 1)" ),
rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5911, 89/17
pkt = NCP(0x5911, "Recover Salvageable File", 'enhanced', has_length=0)
pkt.Request((24,278), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 4, SequenceNumber ),
rec( 14, 4, VolumeID ),
rec( 18, 4, DirectoryBase ),
rec( 22, 1, DataTypeFlag ),
rec( 23, (1,255), FileName16, info_str=(FileName16, "Recover Deleted File: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5913, 89/19
pkt = NCP(0x5913, "Get Name Space Information", 'enhanced', has_length=0)
pkt.Request(18, [
rec( 8, 1, SrcNameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 1, DataTypeFlag ),
rec( 11, 1, VolumeNumber ),
rec( 12, 4, DirectoryBase ),
rec( 16, 2, NamesSpaceInfoMask ),
])
pkt.Reply(NO_LENGTH_CHECK, [
srec( FileName16Struct, req_cond="ncp.ns_info_mask_modify == TRUE" ),
srec( FileAttributesStruct, req_cond="ncp.ns_info_mask_fatt == TRUE" ),
srec( CreationDateStruct, req_cond="ncp.ns_info_mask_cdate == TRUE" ),
srec( CreationTimeStruct, req_cond="ncp.ns_info_mask_ctime == TRUE" ),
srec( OwnerIDStruct, req_cond="ncp.ns_info_mask_owner == TRUE" ),
srec( ArchiveDateStruct, req_cond="ncp.ns_info_mask_adate == TRUE" ),
srec( ArchiveTimeStruct, req_cond="ncp.ns_info_mask_atime == TRUE" ),
srec( ArchiveIdStruct, req_cond="ncp.ns_info_mask_aid == TRUE" ),
srec( UpdateDateStruct, req_cond="ncp.ns_info_mask_udate == TRUE" ),
srec( UpdateTimeStruct, req_cond="ncp.ns_info_mask_utime == TRUE" ),
srec( UpdateIDStruct, req_cond="ncp.ns_info_mask_uid == TRUE" ),
srec( LastAccessStruct, req_cond="ncp.ns_info_mask_acc_date == TRUE" ),
srec( RightsInfoStruct, req_cond="ncp.ns_info_mask_max_acc_mask == TRUE" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5914, 89/20
pkt = NCP(0x5914, "Search for File or Subdirectory Set", 'enhanced', has_length=0)
pkt.Request((28, 28), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 2, ReturnInfoCount ),
rec( 18, 9, SeachSequenceStruct ),
rec( 27, 1, DataTypeFlag ),
# next field is dissected in packet-ncp2222.inc
#rec( 28, (2,255), SearchPattern16 ),
])
# The reply packet is dissected in packet-ncp2222.inc
pkt.Reply(NO_LENGTH_CHECK, [
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5916, 89/22
pkt = NCP(0x5916, "Generate Directory Base and Volume Number", 'enhanced', has_length=0)
pkt.Request((27,280), [
rec( 8, 1, SrcNameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 2, dstNSIndicator ),
rec( 12, 4, DirectoryBase ),
rec( 16, 1, VolumeNumber ),
rec( 17, 1, HandleFlag ),
rec( 18, 1, DataTypeFlag ),
rec( 19, 5, Reserved5 ),
rec( 24, 1, PathCount, var="x" ),
rec( 25, (2,255), Path16, repeat="x", info_str=(Path16, "Get Volume and Directory Base from: %s", "/%s") ),
])
pkt.Reply(17, [
rec( 8, 4, DirectoryBase ),
rec( 12, 4, DOSDirectoryBase ),
rec( 16, 1, VolumeNumber ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5919, 89/25
pkt = NCP(0x5919, "Set Name Space Information", 'enhanced', has_length=0)
pkt.Request(530, [
rec( 8, 1, SrcNameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 1, VolumeNumber ),
rec( 11, 4, DirectoryBase ),
rec( 15, 2, NamesSpaceInfoMask ),
rec( 17, 1, DataTypeFlag ),
rec( 18, 512, NSSpecificInfo ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
0x9600, 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00,
0xff16])
# 2222/591C, 89/28
pkt = NCP(0x591C, "Get Full Path String", 'enhanced', has_length=0)
pkt.Request((35,288), [
rec( 8, 1, SrcNameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 2, PathCookieFlags ),
rec( 12, 4, Cookie1 ),
rec( 16, 4, Cookie2 ),
rec( 20, 4, DirectoryBase ),
rec( 24, 1, VolumeNumber ),
rec( 25, 1, HandleFlag ),
rec( 26, 1, DataTypeFlag ),
rec( 27, 5, Reserved5 ),
rec( 32, 1, PathCount, var="x" ),
rec( 33, (2,255), Path16, repeat="x", info_str=(Path16, "Get Full Path from: %s", "/%s") ),
])
pkt.Reply((24,277), [
rec( 8, 2, PathCookieFlags ),
rec( 10, 4, Cookie1 ),
rec( 14, 4, Cookie2 ),
rec( 18, 2, PathComponentSize ),
rec( 20, 2, PathComponentCount, var='x' ),
rec( 22, (2,255), Path16, repeat='x' ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8b00, 0x8d00, 0x8f00, 0x9001,
0x9600, 0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00,
0xff16])
# 2222/591D, 89/29
pkt = NCP(0x591D, "Get Effective Directory Rights", 'enhanced', has_length=0)
pkt.Request((31, 284), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DestNameSpace ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 4, DirectoryBase ),
rec( 20, 1, VolumeNumber ),
rec( 21, 1, HandleFlag ),
rec( 22, 1, DataTypeFlag ),
rec( 23, 5, Reserved5 ),
rec( 28, 1, PathCount, var="x" ),
rec( 29, (2,255), Path16, repeat="x", info_str=(Path16, "Get Effective Rights for: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 2, EffectiveRights, ENC_LITTLE_ENDIAN ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( FileSize64bitStruct, req_cond="(ncp.ext_info_64_bit_fs == 1) && (ncp.ret_info_mask_fname == 1)" ),
srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/591E, 89/30
pkt = NCP(0x591E, "Open/Create File or Subdirectory", 'enhanced', has_length=0)
pkt.Request((41, 294), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 1, OpenCreateMode ),
rec( 11, 1, Reserved ),
rec( 12, 2, SearchAttributesLow ),
rec( 14, 2, Reserved2 ),
rec( 16, 2, ReturnInfoMask ),
rec( 18, 2, ExtendedInfo ),
rec( 20, 4, AttributesDef32 ),
rec( 24, 2, DesiredAccessRights ),
rec( 26, 4, DirectoryBase ),
rec( 30, 1, VolumeNumber ),
rec( 31, 1, HandleFlag ),
rec( 32, 1, DataTypeFlag ),
rec( 33, 5, Reserved5 ),
rec( 38, 1, PathCount, var="x" ),
rec( 39, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create File: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec( 12, 1, OpenCreateAction ),
rec( 13, 1, Reserved ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( FileSize64bitStruct, req_cond="(ncp.ext_info_64_bit_fs == 1) && (ncp.ret_info_mask_fname == 1)" ),
srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
pkt.MakeExpert("file_rights")
# 2222/5920, 89/32
pkt = NCP(0x5920, "Open/Create File or Subdirectory with Callback", 'enhanced', has_length=0)
pkt.Request((37, 290), [
rec( 8, 1, NameSpace ),
rec( 9, 1, OpenCreateMode ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 4, AttributesDef32 ),
rec( 20, 2, DesiredAccessRights ),
rec( 22, 4, DirectoryBase ),
rec( 26, 1, VolumeNumber ),
rec( 27, 1, HandleFlag ),
rec( 28, 1, DataTypeFlag ),
rec( 29, 5, Reserved5 ),
rec( 34, 1, PathCount, var="x" ),
rec( 35, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create with Op-Lock: %s", "/%s") ),
])
pkt.Reply( NO_LENGTH_CHECK, [
rec( 8, 4, FileHandle, ENC_BIG_ENDIAN ),
rec( 12, 1, OpenCreateAction ),
rec( 13, 1, OCRetFlags ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x0102, 0x7f00, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9400, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
pkt.MakeExpert("file_rights")
# 2222/5921, 89/33
pkt = NCP(0x5921, "Open/Create File or Subdirectory II with Callback", 'enhanced', has_length=0)
pkt.Request((41, 294), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 1, OpenCreateMode ),
rec( 11, 1, Reserved ),
rec( 12, 2, SearchAttributesLow ),
rec( 14, 2, Reserved2 ),
rec( 16, 2, ReturnInfoMask ),
rec( 18, 2, ExtendedInfo ),
rec( 20, 4, AttributesDef32 ),
rec( 24, 2, DesiredAccessRights ),
rec( 26, 4, DirectoryBase ),
rec( 30, 1, VolumeNumber ),
rec( 31, 1, HandleFlag ),
rec( 32, 1, DataTypeFlag ),
rec( 33, 5, Reserved5 ),
rec( 38, 1, PathCount, var="x" ),
rec( 39, (2,255), Path16, repeat="x", info_str=(Path16, "Open or Create II with Op-Lock: %s", "/%s") ),
])
pkt.Reply( NO_LENGTH_CHECK, [
rec( 8, 4, FileHandle ),
rec( 12, 1, OpenCreateAction ),
rec( 13, 1, OCRetFlags ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ret_info_mask != 0x0000) && (ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( ReferenceIDStruct, req_cond="ncp.ret_info_mask_id == 1" ),
srec( NSAttributeStruct, req_cond="ncp.ret_info_mask_ns_attr == 1" ),
rec( -1, 4, DataStreamsCount, var="x" , req_cond="ncp.ret_info_mask_actual == 1" ),
srec( DStreamActual, repeat = "x" , req_cond="ncp.ret_info_mask_actual == 1" ),
rec( -1, 4, DataStreamsCount, var="y", req_cond="ncp.ret_info_mask_logical == 1" ),
srec( DStreamLogical, repeat="y" , req_cond="ncp.ret_info_mask_logical == 1" ),
srec( LastUpdatedInSecondsStruct, req_cond="ncp.ext_info_update == 1" ),
srec( DOSName16Struct, req_cond="ncp.ext_info_dos_name == 1" ),
srec( FlushTimeStruct, req_cond="ncp.ext_info_flush == 1" ),
srec( ParentBaseIDStruct, req_cond="ncp.ext_info_parental == 1" ),
srec( MacFinderInfoStruct, req_cond="ncp.ext_info_mac_finder == 1" ),
srec( SiblingCountStruct, req_cond="ncp.ext_info_sibling == 1" ),
srec( EffectiveRightsStruct, req_cond="ncp.ext_info_effective == 1" ),
srec( MacTimeStruct, req_cond="ncp.ext_info_mac_date == 1" ),
srec( LastAccessedTimeStruct, req_cond="ncp.ext_info_access == 1" ),
srec( FileSize64bitStruct, req_cond="ncp.ext_info_64_bit_fs == 1" ),
srec( FileName16Struct, req_cond="ncp.ret_info_mask_fname == 1" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
pkt.MakeExpert("file_rights")
# 2222/5923, 89/35
pkt = NCP(0x5923, "Modify DOS Attributes on a File or Subdirectory", 'enhanced', has_length=0)
pkt.Request((35, 288), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Flags ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 4, AttributesDef32 ),
rec( 20, 4, DirectoryBase ),
rec( 24, 1, VolumeNumber ),
rec( 25, 1, HandleFlag ),
rec( 26, 1, DataTypeFlag ),
rec( 27, 5, Reserved5 ),
rec( 32, 1, PathCount, var="x" ),
rec( 33, (2,255), Path16, repeat="x", info_str=(Path16, "Modify DOS Attributes for: %s", "/%s") ),
])
pkt.Reply(24, [
rec( 8, 4, ItemsChecked ),
rec( 12, 4, ItemsChanged ),
rec( 16, 4, AttributeValidFlag ),
rec( 20, 4, AttributesDef32 ),
])
pkt.CompletionCodes([0x0000, 0x0102, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5927, 89/39
pkt = NCP(0x5927, "Get Directory Disk Space Restriction", 'enhanced', has_length=0)
pkt.Request((26, 279), [
rec( 8, 1, NameSpace ),
rec( 9, 2, Reserved2 ),
rec( 11, 4, DirectoryBase ),
rec( 15, 1, VolumeNumber ),
rec( 16, 1, HandleFlag ),
rec( 17, 1, DataTypeFlag ),
rec( 18, 5, Reserved5 ),
rec( 23, 1, PathCount, var="x" ),
rec( 24, (2,255), Path16, repeat="x", info_str=(Path16, "Get Disk Space Restriction for: %s", "/%s") ),
])
pkt.Reply(18, [
rec( 8, 1, NumberOfEntries, var="x" ),
rec( 9, 9, SpaceStruct, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00,
0xff16])
# 2222/5928, 89/40
pkt = NCP(0x5928, "Search for File or Subdirectory Set (Extended Errors)", 'enhanced', has_length=0)
pkt.Request((30, 283), [
rec( 8, 1, NameSpace ),
rec( 9, 1, DataStream ),
rec( 10, 2, SearchAttributesLow ),
rec( 12, 2, ReturnInfoMask ),
rec( 14, 2, ExtendedInfo ),
rec( 16, 2, ReturnInfoCount ),
rec( 18, 9, SeachSequenceStruct ),
rec( 27, 1, DataTypeFlag ),
rec( 28, (2,255), SearchPattern16, info_str=(SearchPattern16, "Search for: %s", ", %s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( 8, 9, SeachSequenceStruct ),
rec( 17, 1, MoreFlag ),
rec( 18, 2, InfoCount ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 1)" ),
srec( PadDSSpaceAllocate, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_alloc == 0)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 1)" ),
srec( PadAttributes, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_attr == 0)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 1)" ),
srec( PadDataStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_size == 0)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 1)" ),
srec( PadTotalStreamSize, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_tspace == 0)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 1)" ),
srec( PadCreationInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_create == 0)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 1)" ),
srec( PadModifyInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_mod == 0)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 1)" ),
srec( PadArchiveInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_arch == 0)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 1)" ),
srec( PadRightsInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_rights == 0)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 1)" ),
srec( PadDirEntry, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_dir == 0)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 1)" ),
srec( PadEAInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_eattr == 0)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 1)" ),
srec( PadNSInfo, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_ns == 0)" ),
srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 0) && (ncp.ret_info_mask_fname == 1)" ),
srec( DSSpaceAllocateStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_alloc == 1)" ),
srec( AttributesStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_attr == 1)" ),
srec( DataStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_size == 1)" ),
srec( TotalStreamSizeStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_tspace == 1)" ),
srec( CreationInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_create == 1)" ),
srec( ModifyInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_mod == 1)" ),
srec( ArchiveInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_arch == 1)" ),
srec( RightsInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_rights == 1)" ),
srec( DirEntryStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_dir == 1)" ),
srec( EAInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_eattr == 1)" ),
srec( NSInfoStruct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_ns == 1)" ),
srec( FileSize64bitStruct, req_cond="(ncp.ext_info_64_bit_fs == 1) && (ncp.ret_info_mask_fname == 1)" ),
srec( FileName16Struct, req_cond="(ncp.ext_info_newstyle == 1) && (ncp.ret_info_mask_fname == 1)" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5929, 89/41
pkt = NCP(0x5929, "Get Directory Disk Space Restriction 64 Bit Aware", 'enhanced', has_length=0)
pkt.Request((26, 279), [
rec( 8, 1, NameSpace ),
rec( 9, 1, Reserved ),
rec( 10, 1, InfoLevelNumber),
rec( 11, 4, DirectoryBase ),
rec( 15, 1, VolumeNumber ),
rec( 16, 1, HandleFlag ),
rec( 17, 1, DataTypeFlag ),
rec( 18, 5, Reserved5 ),
rec( 23, 1, PathCount, var="x" ),
rec( 24, (2,255), Path16, repeat="x", info_str=(Path16, "Get Disk Space Restriction for: %s", "/%s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec( -1, 8, MaxSpace64, req_cond = "(ncp.info_level_num == 0)" ),
rec( -1, 8, MinSpaceLeft64, req_cond = "(ncp.info_level_num == 0)" ),
rec( -1, 1, NumberOfEntries, var="x", req_cond = "(ncp.info_level_num == 1)" ),
srec( DirDiskSpaceRest64bit, repeat="x", req_cond = "(ncp.info_level_num == 1)" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00,
0xff16])
# 2222/5932, 89/50
pkt = NCP(0x5932, "Get Object Effective Rights", "enhanced", has_length=0)
pkt.Request(25, [
rec( 8, 1, NameSpace ),
rec( 9, 4, ObjectID ),
rec( 13, 4, DirectoryBase ),
rec( 17, 1, VolumeNumber ),
rec( 18, 1, HandleFlag ),
rec( 19, 1, DataTypeFlag ),
rec( 20, 5, Reserved5 ),
])
pkt.Reply( 10, [
rec( 8, 2, TrusteeRights ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x9b00, 0x9c03, 0xa901, 0xaa00])
# 2222/5934, 89/52
pkt = NCP(0x5934, "Write Extended Attribute", 'enhanced', has_length=0 )
pkt.Request((36,98), [
rec( 8, 2, EAFlags ),
rec( 10, 4, EAHandleOrNetWareHandleOrVolume ),
rec( 14, 4, ReservedOrDirectoryNumber ),
rec( 18, 4, TtlWriteDataSize ),
rec( 22, 4, FileOffset ),
rec( 26, 4, EAAccessFlag ),
rec( 30, 1, DataTypeFlag ),
rec( 31, 2, EAValueLength, var='x' ),
rec( 33, (2,64), EAKey, info_str=(EAKey, "Write Extended Attribute: %s", ", %s") ),
rec( -1, 1, EAValueRep, repeat='x' ),
])
pkt.Reply(20, [
rec( 8, 4, EAErrorCodes ),
rec( 12, 4, EABytesWritten ),
rec( 16, 4, NewEAHandle ),
])
pkt.CompletionCodes([0x0000, 0xc800, 0xc900, 0xcb00, 0xce00, 0xcf00, 0xd101,
0xd203, 0xa901, 0xaa00, 0xd301, 0xd402])
# 2222/5935, 89/53
pkt = NCP(0x5935, "Read Extended Attribute", 'enhanced', has_length=0 )
pkt.Request((31,541), [
rec( 8, 2, EAFlags ),
rec( 10, 4, EAHandleOrNetWareHandleOrVolume ),
rec( 14, 4, ReservedOrDirectoryNumber ),
rec( 18, 4, FileOffset ),
rec( 22, 4, InspectSize ),
rec( 26, 1, DataTypeFlag ),
rec( 27, 2, MaxReadDataReplySize ),
rec( 29, (2,512), EAKey, info_str=(EAKey, "Read Extended Attribute: %s", ", %s") ),
])
pkt.Reply((26,536), [
rec( 8, 4, EAErrorCodes ),
rec( 12, 4, TtlValuesLength ),
rec( 16, 4, NewEAHandle ),
rec( 20, 4, EAAccessFlag ),
rec( 24, (2,512), EAValue ),
])
pkt.CompletionCodes([0x0000, 0xa901, 0xaa00, 0xc900, 0xce00, 0xcf00, 0xd101,
0xd301])
# 2222/5936, 89/54
pkt = NCP(0x5936, "Enumerate Extended Attribute", 'enhanced', has_length=0 )
pkt.Request((27,537), [
rec( 8, 2, EAFlags ),
rec( 10, 4, EAHandleOrNetWareHandleOrVolume ),
rec( 14, 4, ReservedOrDirectoryNumber ),
rec( 18, 4, InspectSize ),
rec( 22, 2, SequenceNumber ),
rec( 24, 1, DataTypeFlag ),
rec( 25, (2,512), EAKey, info_str=(EAKey, "Enumerate Extended Attribute: %s", ", %s") ),
])
pkt.Reply(28, [
rec( 8, 4, EAErrorCodes ),
rec( 12, 4, TtlEAs ),
rec( 16, 4, TtlEAsDataSize ),
rec( 20, 4, TtlEAsKeySize ),
rec( 24, 4, NewEAHandle ),
])
pkt.CompletionCodes([0x0000, 0x8800, 0xa901, 0xaa00, 0xc900, 0xce00, 0xcf00, 0xd101,
0xd301])
# 2222/5947, 89/71
pkt = NCP(0x5947, "Scan Volume Trustee Object Paths", 'enhanced', has_length=0)
pkt.Request(21, [
rec( 8, 4, VolumeID ),
rec( 12, 4, ObjectID ),
rec( 16, 4, SequenceNumber ),
rec( 20, 1, DataTypeFlag ),
])
pkt.Reply((20,273), [
rec( 8, 4, SequenceNumber ),
rec( 12, 4, ObjectID ),
rec( 16, 1, TrusteeAccessMask ),
rec( 17, 1, PathCount, var="x" ),
rec( 18, (2,255), Path16, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa901, 0xaa00, 0xbf00, 0xfd00, 0xff16])
# 2222/5A01, 90/00
pkt = NCP(0x5A00, "Parse Tree", 'file')
pkt.Request(46, [
rec( 10, 4, InfoMask ),
rec( 14, 4, Reserved4 ),
rec( 18, 4, Reserved4 ),
rec( 22, 4, limbCount ),
rec( 26, 4, limbFlags ),
rec( 30, 4, VolumeNumberLong ),
rec( 34, 4, DirectoryBase ),
rec( 38, 4, limbScanNum ),
rec( 42, 4, NameSpace ),
])
pkt.Reply(32, [
rec( 8, 4, limbCount ),
rec( 12, 4, ItemsCount ),
rec( 16, 4, nextLimbScanNum ),
rec( 20, 4, CompletionCode ),
rec( 24, 1, FolderFlag ),
rec( 25, 3, Reserved ),
rec( 28, 4, DirectoryBase ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
# 2222/5A0A, 90/10
pkt = NCP(0x5A0A, "Get Reference Count from Dir Entry Number", 'file')
pkt.Request(19, [
rec( 10, 4, VolumeNumberLong ),
rec( 14, 4, DirectoryBase ),
rec( 18, 1, NameSpace ),
])
pkt.Reply(12, [
rec( 8, 4, ReferenceCount ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
# 2222/5A0B, 90/11
pkt = NCP(0x5A0B, "Get Reference Count from Dir Handle", 'file')
pkt.Request(14, [
rec( 10, 4, DirHandle ),
])
pkt.Reply(12, [
rec( 8, 4, ReferenceCount ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
# 2222/5A0C, 90/12
pkt = NCP(0x5A0C, "Set Compressed File Size", 'file')
pkt.Request(20, [
rec( 10, 6, FileHandle ),
rec( 16, 4, SuggestedFileSize ),
])
pkt.Reply(16, [
rec( 8, 4, OldFileSize ),
rec( 12, 4, NewFileSize ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xfd00, 0xff16])
# 2222/5A80, 90/128
pkt = NCP(0x5A80, "Move File Data To Data Migration", 'migration')
pkt.Request(27, [
rec( 10, 4, VolumeNumberLong ),
rec( 14, 4, DirectoryEntryNumber ),
rec( 18, 1, NameSpace ),
rec( 19, 3, Reserved ),
rec( 22, 4, SupportModuleID ),
rec( 26, 1, DMFlags ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5A81, 90/129
pkt = NCP(0x5A81, "Data Migration File Information", 'migration')
pkt.Request(19, [
rec( 10, 4, VolumeNumberLong ),
rec( 14, 4, DirectoryEntryNumber ),
rec( 18, 1, NameSpace ),
])
pkt.Reply(24, [
rec( 8, 4, SupportModuleID ),
rec( 12, 4, RestoreTime ),
rec( 16, 4, DMInfoEntries, var="x" ),
rec( 20, 4, DataSize, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5A82, 90/130
pkt = NCP(0x5A82, "Volume Data Migration Status", 'migration')
pkt.Request(18, [
rec( 10, 4, VolumeNumberLong ),
rec( 14, 4, SupportModuleID ),
])
pkt.Reply(32, [
rec( 8, 4, NumOfFilesMigrated ),
rec( 12, 4, TtlMigratedSize ),
rec( 16, 4, SpaceUsed ),
rec( 20, 4, LimboUsed ),
rec( 24, 4, SpaceMigrated ),
rec( 28, 4, FileLimbo ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5A83, 90/131
pkt = NCP(0x5A83, "Migrator Status Info", 'migration')
pkt.Request(10)
pkt.Reply(20, [
rec( 8, 1, DMPresentFlag ),
rec( 9, 3, Reserved3 ),
rec( 12, 4, DMmajorVersion ),
rec( 16, 4, DMminorVersion ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5A84, 90/132
pkt = NCP(0x5A84, "Data Migration Support Module Information", 'migration')
pkt.Request(18, [
rec( 10, 1, DMInfoLevel ),
rec( 11, 3, Reserved3),
rec( 14, 4, SupportModuleID ),
])
pkt.Reply(NO_LENGTH_CHECK, [
srec( DMInfoLevel0, req_cond="ncp.dm_info_level == 0x00" ),
srec( DMInfoLevel1, req_cond="ncp.dm_info_level == 0x01" ),
srec( DMInfoLevel2, req_cond="ncp.dm_info_level == 0x02" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5A85, 90/133
pkt = NCP(0x5A85, "Move File Data From Data Migration", 'migration')
pkt.Request(19, [
rec( 10, 4, VolumeNumberLong ),
rec( 14, 4, DirectoryEntryNumber ),
rec( 18, 1, NameSpace ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5A86, 90/134
pkt = NCP(0x5A86, "Get/Set Default Read-Write Support Module ID", 'migration')
pkt.Request(18, [
rec( 10, 1, GetSetFlag ),
rec( 11, 3, Reserved3 ),
rec( 14, 4, SupportModuleID ),
])
pkt.Reply(12, [
rec( 8, 4, SupportModuleID ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5A87, 90/135
pkt = NCP(0x5A87, "Data Migration Support Module Capacity Request", 'migration')
pkt.Request(22, [
rec( 10, 4, SupportModuleID ),
rec( 14, 4, VolumeNumberLong ),
rec( 18, 4, DirectoryBase ),
])
pkt.Reply(20, [
rec( 8, 4, BlockSizeInSectors ),
rec( 12, 4, TotalBlocks ),
rec( 16, 4, UsedBlocks ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5A88, 90/136
pkt = NCP(0x5A88, "RTDM Request", 'migration')
pkt.Request(15, [
rec( 10, 4, Verb ),
rec( 14, 1, VerbData ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5A96, 90/150
pkt = NCP(0x5A96, "File Migration Request", 'file')
pkt.Request(22, [
rec( 10, 4, VolumeNumberLong ),
rec( 14, 4, DirectoryBase ),
rec( 18, 4, FileMigrationState ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfb00, 0xff16])
# 2222/5C, 91
pkt = NCP(0x5B, "NMAS Graded Authentication", 'nmas')
#Need info on this packet structure
pkt.Request(7)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# SecretStore data is dissected by packet-ncp-sss.c
# 2222/5C01, 9201
pkt = NCP(0x5C01, "SecretStore Services (Ping Server)", 'sss', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5C02, 9202
pkt = NCP(0x5C02, "SecretStore Services (Fragment)", 'sss', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5C03, 9203
pkt = NCP(0x5C03, "SecretStore Services (Write App Secrets)", 'sss', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5C04, 9204
pkt = NCP(0x5C04, "SecretStore Services (Add Secret ID)", 'sss', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5C05, 9205
pkt = NCP(0x5C05, "SecretStore Services (Remove Secret ID)", 'sss', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5C06, 9206
pkt = NCP(0x5C06, "SecretStore Services (Remove SecretStore)", 'sss', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5C07, 9207
pkt = NCP(0x5C07, "SecretStore Services (Enumerate Secret IDs)", 'sss', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5C08, 9208
pkt = NCP(0x5C08, "SecretStore Services (Unlock Store)", 'sss', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5C09, 9209
pkt = NCP(0x5C09, "SecretStore Services (Set Master Password)", 'sss', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# 2222/5C0a, 9210
pkt = NCP(0x5C0a, "SecretStore Services (Get Service Information)", 'sss', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e01, 0x8000, 0x8101, 0x8401, 0x8501,
0x8701, 0x8800, 0x8d00, 0x8f00, 0x9001, 0x9600, 0xfb0b,
0x9804, 0x9b03, 0x9c03, 0xa800, 0xfd00, 0xff16])
# NMAS packets are dissected in packet-ncp-nmas.c
# 2222/5E, 9401
pkt = NCP(0x5E01, "NMAS Communications Packet (Ping)", 'nmas', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xfb09, 0xff08])
# 2222/5E, 9402
pkt = NCP(0x5E02, "NMAS Communications Packet (Fragment)", 'nmas', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xfb09, 0xff08])
# 2222/5E, 9403
pkt = NCP(0x5E03, "NMAS Communications Packet (Abort)", 'nmas', 0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xfb09, 0xff08])
# 2222/61, 97
pkt = NCP(0x61, "Get Big Packet NCP Max Packet Size", 'connection')
pkt.Request(10, [
rec( 7, 2, ProposedMaxSize, ENC_BIG_ENDIAN, info_str=(ProposedMaxSize, "Get Big Max Packet Size - %d", ", %d") ),
rec( 9, 1, SecurityFlag ),
])
pkt.Reply(13, [
rec( 8, 2, AcceptedMaxSize, ENC_BIG_ENDIAN ),
rec( 10, 2, EchoSocket, ENC_BIG_ENDIAN ),
rec( 12, 1, SecurityFlag ),
])
pkt.CompletionCodes([0x0000])
# 2222/62, 98
pkt = NCP(0x62, "Negotiate NDS connection buffer size", 'connection')
pkt.Request(15, [
rec( 7, 8, ProposedMaxSize64, ENC_BIG_ENDIAN, Info_str=(ProposedMaxSize, "Negotiate NDS connection - %d", ", %d")),
])
pkt.Reply(18, [
rec( 8, 8, AcceptedMaxSize64, ENC_BIG_ENDIAN ),
rec( 16, 2, EchoSocket, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000])
# 2222/63, 99
pkt = NCP(0x63, "Undocumented Packet Burst", 'pburst')
pkt.Request(7)
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/64, 100
pkt = NCP(0x64, "Undocumented Packet Burst", 'pburst')
pkt.Request(7)
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/65, 101
pkt = NCP(0x65, "Packet Burst Connection Request", 'pburst')
pkt.Request(25, [
rec( 7, 4, LocalConnectionID, ENC_BIG_ENDIAN ),
rec( 11, 4, LocalMaxPacketSize, ENC_BIG_ENDIAN ),
rec( 15, 2, LocalTargetSocket, ENC_BIG_ENDIAN ),
rec( 17, 4, LocalMaxSendSize, ENC_BIG_ENDIAN ),
rec( 21, 4, LocalMaxRecvSize, ENC_BIG_ENDIAN ),
])
pkt.Reply(16, [
rec( 8, 4, RemoteTargetID, ENC_BIG_ENDIAN ),
rec( 12, 4, RemoteMaxPacketSize, ENC_BIG_ENDIAN ),
])
pkt.CompletionCodes([0x0000])
# 2222/66, 102
pkt = NCP(0x66, "Undocumented Packet Burst", 'pburst')
pkt.Request(7)
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/67, 103
pkt = NCP(0x67, "Undocumented Packet Burst", 'pburst')
pkt.Request(7)
pkt.Reply(8)
pkt.CompletionCodes([0x0000])
# 2222/6801, 104/01
pkt = NCP(0x6801, "Ping for NDS NCP", "nds", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x8100, 0xfb04, 0xfe0c])
# 2222/6802, 104/02
#
# XXX - if FraggerHandle is not 0xffffffff, this is not the
# first fragment, so we can only dissect this by reassembling;
# the fields after "Fragment Handle" are bogus for non-0xffffffff
# fragments, so we shouldn't dissect them. This is all handled in packet-ncp2222.inc.
#
pkt = NCP(0x6802, "Send NDS Fragmented Request/Reply", "nds", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0xac00, 0xfd01])
# 2222/6803, 104/03
pkt = NCP(0x6803, "Fragment Close", "nds", has_length=0)
pkt.Request(12, [
rec( 8, 4, FraggerHandle ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xff00])
# 2222/6804, 104/04
pkt = NCP(0x6804, "Return Bindery Context", "nds", has_length=0)
pkt.Request(8)
pkt.Reply((9, 263), [
rec( 8, (1,255), binderyContext ),
])
pkt.CompletionCodes([0x0000, 0xfe0c, 0xff00])
# 2222/6805, 104/05
pkt = NCP(0x6805, "Monitor NDS Connection", "nds", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7700, 0xfb00, 0xfe0c, 0xff00])
# 2222/6806, 104/06
pkt = NCP(0x6806, "Return NDS Statistics", "nds", has_length=0)
pkt.Request(10, [
rec( 8, 2, NDSRequestFlags ),
])
pkt.Reply(8)
#Need to investigate how to decode Statistics Return Value
pkt.CompletionCodes([0x0000, 0xfb00, 0xfe0c, 0xff00])
# 2222/6807, 104/07
pkt = NCP(0x6807, "Clear NDS Statistics", "nds", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xfb00, 0xfe0c, 0xff00])
# 2222/6808, 104/08
pkt = NCP(0x6808, "Reload NDS Software", "nds", has_length=0)
pkt.Request(8)
pkt.Reply(12, [
rec( 8, 4, NDSStatus ),
])
pkt.CompletionCodes([0x0000, 0xfb00, 0xfe0c, 0xff00])
# 2222/68C8, 104/200
pkt = NCP(0x68C8, "Query Container Audit Status", "auditing", has_length=0)
pkt.Request(12, [
rec( 8, 4, ConnectionNumber ),
])
pkt.Reply(40, [
rec(8, 32, NWAuditStatus ),
])
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68CA, 104/202
pkt = NCP(0x68CA, "Add Auditor Access", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68CB, 104/203
pkt = NCP(0x68CB, "Change Auditor Container Password", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68CC, 104/204
pkt = NCP(0x68CC, "Check Auditor Access", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68CE, 104/206
pkt = NCP(0x680CE, "Disable Container Auditing", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68CF, 104/207
pkt = NCP(0x68CF, "Enable Container Auditing", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68D1, 104/209
pkt = NCP(0x68D1, "Read Audit File Header", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68D3, 104/211
pkt = NCP(0x68D3, "Remove Auditor Access", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68D4, 104/212
pkt = NCP(0x68D4, "Reset Audit File", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68D6, 104/214
pkt = NCP(0x68D6, "Write Audit File Configuration Header", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68D7, 104/215
pkt = NCP(0x68D7, "Change Auditor Container Password2", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68D8, 104/216
pkt = NCP(0x68D8, "Return Audit Flags", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68D9, 104/217
pkt = NCP(0x68D9, "Close Old Audit File", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68DB, 104/219
pkt = NCP(0x68DB, "Check Level Two Access", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68DC, 104/220
pkt = NCP(0x68DC, "Check Object Audited", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68DD, 104/221
pkt = NCP(0x68DD, "Change Object Audited", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68DE, 104/222
pkt = NCP(0x68DE, "Return Old Audit File List", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68DF, 104/223
pkt = NCP(0x68DF, "Init Audit File Reads", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68E0, 104/224
pkt = NCP(0x68E0, "Read Auditing File", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68E1, 104/225
pkt = NCP(0x68E1, "Delete Old Audit File", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68E5, 104/229
pkt = NCP(0x68E5, "Set Audit Password", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/68E7, 104/231
pkt = NCP(0x68E7, "External Audit Append To File", "auditing", has_length=0)
pkt.Request(8)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0xa700, 0xfb00, 0xfe0c, 0xff00])
# 2222/69, 105
pkt = NCP(0x69, "Log File", 'sync')
pkt.Request( (12, 267), [
rec( 7, 1, DirHandle ),
rec( 8, 1, LockFlag ),
rec( 9, 2, TimeoutLimit ),
rec( 11, (1, 256), FilePath, info_str=(FilePath, "Log File: %s", "/%s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x9600, 0xfe0d, 0xff01])
# 2222/6A, 106
pkt = NCP(0x6A, "Lock File Set", 'sync')
pkt.Request( 9, [
rec( 7, 2, TimeoutLimit ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x9600, 0xfe0d, 0xff01])
# 2222/6B, 107
pkt = NCP(0x6B, "Log Logical Record", 'sync')
pkt.Request( (11, 266), [
rec( 7, 1, LockFlag ),
rec( 8, 2, TimeoutLimit ),
rec( 10, (1, 256), SynchName, info_str=(SynchName, "Log Logical Record: %s", ", %s") ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7f00, 0x9600, 0xfe0d, 0xff01])
# 2222/6C, 108
pkt = NCP(0x6C, "Log Logical Record", 'sync')
pkt.Request( 10, [
rec( 7, 1, LockFlag ),
rec( 8, 2, TimeoutLimit ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7f00, 0x9600, 0xfe0d, 0xff01])
# 2222/6D, 109
pkt = NCP(0x6D, "Log Physical Record", 'sync')
pkt.Request(24, [
rec( 7, 1, LockFlag ),
rec( 8, 6, FileHandle ),
rec( 14, 4, LockAreasStartOffset ),
rec( 18, 4, LockAreaLen ),
rec( 22, 2, LockTimeout ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01])
# 2222/6E, 110
pkt = NCP(0x6E, "Lock Physical Record Set", 'sync')
pkt.Request(10, [
rec( 7, 1, LockFlag ),
rec( 8, 2, LockTimeout ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7f00, 0x8200, 0x8800, 0x9600, 0xfd02, 0xfe04, 0xff01])
# 2222/6F00, 111/00
pkt = NCP(0x6F00, "Open/Create a Semaphore", 'sync', has_length=0)
pkt.Request((10,521), [
rec( 8, 1, InitialSemaphoreValue ),
rec( 9, (1, 512), SemaphoreName, info_str=(SemaphoreName, "Open/Create Semaphore: %s", ", %s") ),
])
pkt.Reply(13, [
rec( 8, 4, SemaphoreHandle ),
rec( 12, 1, SemaphoreOpenCount ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
# 2222/6F01, 111/01
pkt = NCP(0x6F01, "Examine Semaphore", 'sync', has_length=0)
pkt.Request(12, [
rec( 8, 4, SemaphoreHandle ),
])
pkt.Reply(10, [
rec( 8, 1, SemaphoreValue ),
rec( 9, 1, SemaphoreOpenCount ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xff01])
# 2222/6F02, 111/02
pkt = NCP(0x6F02, "Wait On (P) Semaphore", 'sync', has_length=0)
pkt.Request(14, [
rec( 8, 4, SemaphoreHandle ),
rec( 12, 2, LockTimeout ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xfe04, 0xff01])
# 2222/6F03, 111/03
pkt = NCP(0x6F03, "Signal (V) Semaphore", 'sync', has_length=0)
pkt.Request(12, [
rec( 8, 4, SemaphoreHandle ),
])
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9600, 0xfe04, 0xff01])
# 2222/6F04, 111/04
pkt = NCP(0x6F04, "Close Semaphore", 'sync', has_length=0)
pkt.Request(12, [
rec( 8, 4, SemaphoreHandle ),
])
pkt.Reply(10, [
rec( 8, 1, SemaphoreOpenCount ),
rec( 9, 1, SemaphoreShareCount ),
])
pkt.CompletionCodes([0x0000, 0x9600, 0xfe04, 0xff01])
## 2222/1125
pkt = NCP(0x70, "Clear and Get Waiting Lock Completion", 'sync')
pkt.Request(7)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x9b00, 0x9c03, 0xff1a])
# 2222/7201, 114/01
pkt = NCP(0x7201, "Timesync Get Time", 'tsync')
pkt.Request(10)
pkt.Reply(32,[
rec( 8, 12, theTimeStruct ),
rec(20, 8, eventOffset ),
rec(28, 4, eventTime ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/7202, 114/02
pkt = NCP(0x7202, "Timesync Exchange Time", 'tsync')
pkt.Request((63,112), [
rec( 10, 4, protocolFlags ),
rec( 14, 4, nodeFlags ),
rec( 18, 8, sourceOriginateTime ),
rec( 26, 8, targetReceiveTime ),
rec( 34, 8, targetTransmitTime ),
rec( 42, 8, sourceReturnTime ),
rec( 50, 8, eventOffset ),
rec( 58, 4, eventTime ),
rec( 62, (1,50), ServerNameLen, info_str=(ServerNameLen, "Timesync Exchange Time: %s", ", %s") ),
])
pkt.Reply((64,113), [
rec( 8, 3, Reserved3 ),
rec( 11, 4, protocolFlags ),
rec( 15, 4, nodeFlags ),
rec( 19, 8, sourceOriginateTime ),
rec( 27, 8, targetReceiveTime ),
rec( 35, 8, targetTransmitTime ),
rec( 43, 8, sourceReturnTime ),
rec( 51, 8, eventOffset ),
rec( 59, 4, eventTime ),
rec( 63, (1,50), ServerNameLen ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/7205, 114/05
pkt = NCP(0x7205, "Timesync Get Server List", 'tsync')
pkt.Request(14, [
rec( 10, 4, StartNumber ),
])
pkt.Reply(66, [
rec( 8, 4, nameType ),
rec( 12, 48, ServerName ),
rec( 60, 4, serverListFlags ),
rec( 64, 2, startNumberFlag ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/7206, 114/06
pkt = NCP(0x7206, "Timesync Set Server List", 'tsync')
pkt.Request(14, [
rec( 10, 4, StartNumber ),
])
pkt.Reply(66, [
rec( 8, 4, nameType ),
rec( 12, 48, ServerName ),
rec( 60, 4, serverListFlags ),
rec( 64, 2, startNumberFlag ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/720C, 114/12
pkt = NCP(0x720C, "Timesync Get Version", 'tsync')
pkt.Request(10)
pkt.Reply(12, [
rec( 8, 4, version ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/7B01, 123/01
pkt = NCP(0x7B01, "Get Cache Information", 'stats')
pkt.Request(10)
pkt.Reply(288, [
rec(8, 4, CurrentServerTime, ENC_LITTLE_ENDIAN),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 104, Counters ),
rec(120, 40, ExtraCacheCntrs ),
rec(160, 40, MemoryCounters ),
rec(200, 48, TrendCounters ),
rec(248, 40, CacheInfo ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xff00])
# 2222/7B02, 123/02
pkt = NCP(0x7B02, "Get File Server Information", 'stats')
pkt.Request(10)
pkt.Reply(150, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, NCPStaInUseCnt ),
rec(20, 4, NCPPeakStaInUse ),
rec(24, 4, NumOfNCPReqs ),
rec(28, 4, ServerUtilization ),
rec(32, 96, ServerInfo ),
rec(128, 22, FileServerCounters ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B03, 123/03
pkt = NCP(0x7B03, "NetWare File System Information", 'stats')
pkt.Request(11, [
rec(10, 1, FileSystemID ),
])
pkt.Reply(68, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 52, FileSystemInfo ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B04, 123/04
pkt = NCP(0x7B04, "User Information", 'stats')
pkt.Request(14, [
rec(10, 4, ConnectionNumber, ENC_LITTLE_ENDIAN ),
])
pkt.Reply((85, 132), [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 68, UserInformation ),
rec(84, (1, 48), UserName ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B05, 123/05
pkt = NCP(0x7B05, "Packet Burst Information", 'stats')
pkt.Request(10)
pkt.Reply(216, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 200, PacketBurstInformation ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B06, 123/06
pkt = NCP(0x7B06, "IPX SPX Information", 'stats')
pkt.Request(10)
pkt.Reply(94, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 34, IPXInformation ),
rec(50, 44, SPXInformation ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B07, 123/07
pkt = NCP(0x7B07, "Garbage Collection Information", 'stats')
pkt.Request(10)
pkt.Reply(40, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, FailedAllocReqCnt ),
rec(20, 4, NumberOfAllocs ),
rec(24, 4, NoMoreMemAvlCnt ),
rec(28, 4, NumOfGarbageColl ),
rec(32, 4, FoundSomeMem ),
rec(36, 4, NumOfChecks ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B08, 123/08
pkt = NCP(0x7B08, "CPU Information", 'stats')
pkt.Request(14, [
rec(10, 4, CPUNumber ),
])
pkt.Reply(51, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, NumberOfCPUs ),
rec(20, 31, CPUInformation ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B09, 123/09
pkt = NCP(0x7B09, "Volume Switch Information", 'stats')
pkt.Request(14, [
rec(10, 4, StartNumber )
])
pkt.Reply(28, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, TotalLFSCounters ),
rec(20, 4, CurrentLFSCounters, var="x"),
rec(24, 4, LFSCounters, repeat="x"),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B0A, 123/10
pkt = NCP(0x7B0A, "Get NLM Loaded List", 'stats')
pkt.Request(14, [
rec(10, 4, StartNumber )
])
pkt.Reply(28, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, NLMcount ),
rec(20, 4, NLMsInList, var="x" ),
rec(24, 4, NLMNumbers, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B0B, 123/11
pkt = NCP(0x7B0B, "NLM Information", 'stats')
pkt.Request(14, [
rec(10, 4, NLMNumber ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 60, NLMInformation ),
# The remainder of this dissection is manually decoded in packet-ncp2222.inc
#rec(-1, (1,255), FileName ),
#rec(-1, (1,255), Name ),
#rec(-1, (1,255), Copyright ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B0C, 123/12
pkt = NCP(0x7B0C, "Get Directory Cache Information", 'stats')
pkt.Request(10)
pkt.Reply(72, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 56, DirCacheInfo ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B0D, 123/13
pkt = NCP(0x7B0D, "Get Operating System Version Information", 'stats')
pkt.Request(10)
pkt.Reply(70, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 1, OSMajorVersion ),
rec(17, 1, OSMinorVersion ),
rec(18, 1, OSRevision ),
rec(19, 1, AccountVersion ),
rec(20, 1, VAPVersion ),
rec(21, 1, QueueingVersion ),
rec(22, 1, SecurityRestrictionVersion ),
rec(23, 1, InternetBridgeVersion ),
rec(24, 4, MaxNumOfVol ),
rec(28, 4, MaxNumOfConn ),
rec(32, 4, MaxNumOfUsers ),
rec(36, 4, MaxNumOfNmeSps ),
rec(40, 4, MaxNumOfLANS ),
rec(44, 4, MaxNumOfMedias ),
rec(48, 4, MaxNumOfStacks ),
rec(52, 4, MaxDirDepth ),
rec(56, 4, MaxDataStreams ),
rec(60, 4, MaxNumOfSpoolPr ),
rec(64, 4, ServerSerialNumber ),
rec(68, 2, ServerAppNumber ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B0E, 123/14
pkt = NCP(0x7B0E, "Get Active Connection List by Type", 'stats')
pkt.Request(15, [
rec(10, 4, StartConnNumber ),
rec(14, 1, ConnectionType ),
])
pkt.Reply(528, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 512, ActiveConnBitList ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfd01, 0xff00])
# 2222/7B0F, 123/15
pkt = NCP(0x7B0F, "Get NLM Resource Tag List", 'stats')
pkt.Request(18, [
rec(10, 4, NLMNumber ),
rec(14, 4, NLMStartNumber ),
])
pkt.Reply(37, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, TtlNumOfRTags ),
rec(20, 4, CurNumOfRTags ),
rec(24, 13, RTagStructure ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B10, 123/16
pkt = NCP(0x7B10, "Enumerate Connection Information from Connection List", 'stats')
pkt.Request(22, [
rec(10, 1, EnumInfoMask),
rec(11, 3, Reserved3),
rec(14, 4, itemsInList, var="x"),
rec(18, 4, connList, repeat="x"),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, ItemsInPacket ),
srec(netAddr, req_cond="ncp.enum_info_transport==TRUE"),
srec(timeInfo, req_cond="ncp.enum_info_time==TRUE"),
srec(nameInfo, req_cond="ncp.enum_info_name==TRUE"),
srec(lockInfo, req_cond="ncp.enum_info_lock==TRUE"),
srec(printInfo, req_cond="ncp.enum_info_print==TRUE"),
srec(statsInfo, req_cond="ncp.enum_info_stats==TRUE"),
srec(acctngInfo, req_cond="ncp.enum_info_account==TRUE"),
srec(authInfo, req_cond="ncp.enum_info_auth==TRUE"),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B11, 123/17
pkt = NCP(0x7B11, "Enumerate NCP Service Network Addresses", 'stats')
pkt.Request(14, [
rec(10, 4, SearchNumber ),
])
pkt.Reply(36, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, ServerInfoFlags ),
rec(16, 16, GUID ),
rec(32, 4, NextSearchNum ),
# The following two items are dissected in packet-ncp2222.inc
#rec(36, 4, ItemsInPacket, var="x"),
#rec(40, 20, NCPNetworkAddress, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb01, 0xff00])
# 2222/7B14, 123/20
pkt = NCP(0x7B14, "Active LAN Board List", 'stats')
pkt.Request(14, [
rec(10, 4, StartNumber ),
])
pkt.Reply(28, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, MaxNumOfLANS ),
rec(20, 4, ItemsInPacket, var="x"),
rec(24, 4, BoardNumbers, repeat="x"),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B15, 123/21
pkt = NCP(0x7B15, "LAN Configuration Information", 'stats')
pkt.Request(14, [
rec(10, 4, BoardNumber ),
])
pkt.Reply(152, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16,136, LANConfigInfo ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B16, 123/22
pkt = NCP(0x7B16, "LAN Common Counters Information", 'stats')
pkt.Request(18, [
rec(10, 4, BoardNumber ),
rec(14, 4, BlockNumber ),
])
pkt.Reply(86, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 1, StatMajorVersion ),
rec(15, 1, StatMinorVersion ),
rec(16, 4, TotalCommonCnts ),
rec(20, 4, TotalCntBlocks ),
rec(24, 4, CustomCounters ),
rec(28, 4, NextCntBlock ),
rec(32, 54, CommonLanStruc ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B17, 123/23
pkt = NCP(0x7B17, "LAN Custom Counters Information", 'stats')
pkt.Request(18, [
rec(10, 4, BoardNumber ),
rec(14, 4, StartNumber ),
])
pkt.Reply(25, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, NumOfCCinPkt, var="x"),
rec(20, 5, CustomCntsInfo, repeat="x"),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B18, 123/24
pkt = NCP(0x7B18, "LAN Name Information", 'stats')
pkt.Request(14, [
rec(10, 4, BoardNumber ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, PROTO_LENGTH_UNKNOWN, DriverBoardName ),
rec(-1, PROTO_LENGTH_UNKNOWN, DriverShortName ),
rec(-1, PROTO_LENGTH_UNKNOWN, DriverLogicalName ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B19, 123/25
pkt = NCP(0x7B19, "LSL Information", 'stats')
pkt.Request(10)
pkt.Reply(90, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 74, LSLInformation ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B1A, 123/26
pkt = NCP(0x7B1A, "LSL Logical Board Statistics", 'stats')
pkt.Request(14, [
rec(10, 4, BoardNumber ),
])
pkt.Reply(28, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, LogTtlTxPkts ),
rec(20, 4, LogTtlRxPkts ),
rec(24, 4, UnclaimedPkts ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B1B, 123/27
pkt = NCP(0x7B1B, "MLID Board Information", 'stats')
pkt.Request(14, [
rec(10, 4, BoardNumber ),
])
pkt.Reply(44, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 1, Reserved ),
rec(15, 1, NumberOfProtocols ),
rec(16, 28, MLIDBoardInfo ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B1E, 123/30
pkt = NCP(0x7B1E, "Get Media Manager Object Information", 'stats')
pkt.Request(14, [
rec(10, 4, ObjectNumber ),
])
pkt.Reply(212, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 196, GenericInfoDef ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B1F, 123/31
pkt = NCP(0x7B1F, "Get Media Manager Objects List", 'stats')
pkt.Request(15, [
rec(10, 4, StartNumber ),
rec(14, 1, MediaObjectType ),
])
pkt.Reply(28, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, nextStartingNumber ),
rec(20, 4, ObjectCount, var="x"),
rec(24, 4, ObjectID, repeat="x"),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B20, 123/32
pkt = NCP(0x7B20, "Get Media Manager Object Childrens List", 'stats')
pkt.Request(22, [
rec(10, 4, StartNumber ),
rec(14, 1, MediaObjectType ),
rec(15, 3, Reserved3 ),
rec(18, 4, ParentObjectNumber ),
])
pkt.Reply(28, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, nextStartingNumber ),
rec(20, 4, ObjectCount, var="x" ),
rec(24, 4, ObjectID, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B21, 123/33
pkt = NCP(0x7B21, "Get Volume Segment List", 'stats')
pkt.Request(14, [
rec(10, 4, VolumeNumberLong ),
])
pkt.Reply(32, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, NumOfSegments, var="x" ),
rec(20, 12, Segments, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0x9801, 0xfb06, 0xff00])
# 2222/7B22, 123/34
pkt = NCP(0x7B22, "Get Volume Information by Level", 'stats')
pkt.Request(15, [
rec(10, 4, VolumeNumberLong ),
rec(14, 1, InfoLevelNumber ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 1, InfoLevelNumber ),
rec(17, 3, Reserved3 ),
srec(VolInfoStructure, req_cond="ncp.info_level_num==0x01"),
srec(VolInfo2Struct, req_cond="ncp.info_level_num==0x02"),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B23, 123/35
pkt = NCP(0x7B23, "Get Volume Information by Level 64 Bit Aware", 'stats')
pkt.Request(22, [
rec(10, 4, InpInfotype ),
rec(14, 4, Inpld ),
rec(18, 4, VolInfoReturnInfoMask),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, VolInfoReturnInfoMask),
srec(VolInfoStructure64, req_cond="ncp.vinfo_info64==0x00000001"),
rec( -1, (1,255), VolumeNameLen, req_cond="ncp.vinfo_volname==0x00000002" ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B28, 123/40
pkt = NCP(0x7B28, "Active Protocol Stacks", 'stats')
pkt.Request(14, [
rec(10, 4, StartNumber ),
])
pkt.Reply(48, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, MaxNumOfLANS ),
rec(20, 4, StackCount, var="x" ),
rec(24, 4, nextStartingNumber ),
rec(28, 20, StackInfo, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B29, 123/41
pkt = NCP(0x7B29, "Get Protocol Stack Configuration Information", 'stats')
pkt.Request(14, [
rec(10, 4, StackNumber ),
])
pkt.Reply((37,164), [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 1, ConfigMajorVN ),
rec(17, 1, ConfigMinorVN ),
rec(18, 1, StackMajorVN ),
rec(19, 1, StackMinorVN ),
rec(20, 16, ShortStkName ),
rec(36, (1,128), StackFullNameStr ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B2A, 123/42
pkt = NCP(0x7B2A, "Get Protocol Stack Statistics Information", 'stats')
pkt.Request(14, [
rec(10, 4, StackNumber ),
])
pkt.Reply(38, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 1, StatMajorVersion ),
rec(17, 1, StatMinorVersion ),
rec(18, 2, ComCnts ),
rec(20, 4, CounterMask ),
rec(24, 4, TotalTxPkts ),
rec(28, 4, TotalRxPkts ),
rec(32, 4, IgnoredRxPkts ),
rec(36, 2, CustomCnts ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B2B, 123/43
pkt = NCP(0x7B2B, "Get Protocol Stack Custom Information", 'stats')
pkt.Request(18, [
rec(10, 4, StackNumber ),
rec(14, 4, StartNumber ),
])
pkt.Reply(25, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, CustomCount, var="x" ),
rec(20, 5, CustomCntsInfo, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B2C, 123/44
pkt = NCP(0x7B2C, "Get Protocol Stack Numbers by Media Number", 'stats')
pkt.Request(14, [
rec(10, 4, MediaNumber ),
])
pkt.Reply(24, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, StackCount, var="x" ),
rec(20, 4, StackNumber, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B2D, 123/45
pkt = NCP(0x7B2D, "Get Protocol Stack Numbers by LAN Board Number", 'stats')
pkt.Request(14, [
rec(10, 4, BoardNumber ),
])
pkt.Reply(24, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, StackCount, var="x" ),
rec(20, 4, StackNumber, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B2E, 123/46
pkt = NCP(0x7B2E, "Get Media Name by Media Number", 'stats')
pkt.Request(14, [
rec(10, 4, MediaNumber ),
])
pkt.Reply((17,144), [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, (1,128), MediaName ),
])
pkt.CompletionCodes([0x0000, 0x7900, 0x7e01, 0xfb06, 0xff00])
# 2222/7B2F, 123/47
pkt = NCP(0x7B2F, "Get Loaded Media Number", 'stats')
pkt.Request(10)
pkt.Reply(28, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, MaxNumOfMedias ),
rec(20, 4, MediaListCount, var="x" ),
rec(24, 4, MediaList, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
# 2222/7B32, 123/50
pkt = NCP(0x7B32, "Get General Router and SAP Information", 'stats')
pkt.Request(10)
pkt.Reply(37, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 2, RIPSocketNumber ),
rec(18, 2, Reserved2 ),
rec(20, 1, RouterDownFlag ),
rec(21, 3, Reserved3 ),
rec(24, 1, TrackOnFlag ),
rec(25, 3, Reserved3 ),
rec(28, 1, ExtRouterActiveFlag ),
rec(29, 3, Reserved3 ),
rec(32, 2, SAPSocketNumber ),
rec(34, 2, Reserved2 ),
rec(36, 1, RpyNearestSrvFlag ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
# 2222/7B33, 123/51
pkt = NCP(0x7B33, "Get Network Router Information", 'stats')
pkt.Request(14, [
rec(10, 4, NetworkNumber ),
])
pkt.Reply(26, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 10, KnownRoutes ),
])
pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00])
# 2222/7B34, 123/52
pkt = NCP(0x7B34, "Get Network Routers Information", 'stats')
pkt.Request(18, [
rec(10, 4, NetworkNumber),
rec(14, 4, StartNumber ),
])
pkt.Reply(34, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, NumOfEntries, var="x" ),
rec(20, 14, RoutersInfo, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00])
# 2222/7B35, 123/53
pkt = NCP(0x7B35, "Get Known Networks Information", 'stats')
pkt.Request(14, [
rec(10, 4, StartNumber ),
])
pkt.Reply(30, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, NumOfEntries, var="x" ),
rec(20, 10, KnownRoutes, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
# 2222/7B36, 123/54
pkt = NCP(0x7B36, "Get Server Information", 'stats')
pkt.Request((15,64), [
rec(10, 2, ServerType ),
rec(12, 2, Reserved2 ),
rec(14, (1,50), ServerNameLen, info_str=(ServerNameLen, "Get Server Information: %s", ", %s") ),
])
pkt.Reply(30, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 12, ServerAddress ),
rec(28, 2, HopsToNet ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
# 2222/7B37, 123/55
pkt = NCP(0x7B37, "Get Server Sources Information", 'stats')
pkt.Request((19,68), [
rec(10, 4, StartNumber ),
rec(14, 2, ServerType ),
rec(16, 2, Reserved2 ),
rec(18, (1,50), ServerNameLen, info_str=(ServerNameLen, "Get Server Sources Info: %s", ", %s") ),
])
pkt.Reply(32, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, NumOfEntries, var="x" ),
rec(20, 12, ServersSrcInfo, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00])
# 2222/7B38, 123/56
pkt = NCP(0x7B38, "Get Known Servers Information", 'stats')
pkt.Request(16, [
rec(10, 4, StartNumber ),
rec(14, 2, ServerType ),
])
pkt.Reply(35, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, NumOfEntries, var="x" ),
rec(20, 15, KnownServStruc, repeat="x" ),
])
pkt.CompletionCodes([0x0000, 0x0108, 0x7e01, 0xfb06, 0xff00])
# 2222/7B3C, 123/60
pkt = NCP(0x7B3C, "Get Server Set Commands Information", 'stats')
pkt.Request(14, [
rec(10, 4, StartNumber ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, TtlNumOfSetCmds ),
rec(20, 4, nextStartingNumber ),
rec(24, 1, SetCmdType ),
rec(25, 3, Reserved3 ),
rec(28, 1, SetCmdCategory ),
rec(29, 3, Reserved3 ),
rec(32, 1, SetCmdFlags ),
rec(33, 3, Reserved3 ),
rec(36, PROTO_LENGTH_UNKNOWN, SetCmdName ),
rec(-1, 4, SetCmdValueNum ),
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
# 2222/7B3D, 123/61
pkt = NCP(0x7B3D, "Get Server Set Categories", 'stats')
pkt.Request(14, [
rec(10, 4, StartNumber ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, NumberOfSetCategories ),
rec(20, 4, nextStartingNumber ),
rec(24, PROTO_LENGTH_UNKNOWN, CategoryName ),
])
pkt.CompletionCodes([0x0000, 0x7e01, 0xfb06, 0xff00])
# 2222/7B3E, 123/62
pkt = NCP(0x7B3E, "Get Server Set Commands Information By Name", 'stats')
pkt.Request(NO_LENGTH_CHECK, [
rec(10, PROTO_LENGTH_UNKNOWN, SetParmName, info_str=(SetParmName, "Get Server Set Command Info for: %s", ", %s") ),
])
pkt.Reply(NO_LENGTH_CHECK, [
rec(8, 4, CurrentServerTime ),
rec(12, 1, VConsoleVersion ),
rec(13, 1, VConsoleRevision ),
rec(14, 2, Reserved2 ),
rec(16, 4, TtlNumOfSetCmds ),
rec(20, 4, nextStartingNumber ),
rec(24, 1, SetCmdType ),
rec(25, 3, Reserved3 ),
rec(28, 1, SetCmdCategory ),
rec(29, 3, Reserved3 ),
rec(32, 1, SetCmdFlags ),
rec(33, 3, Reserved3 ),
rec(36, PROTO_LENGTH_UNKNOWN, SetCmdName ),
# The value of the set command is decoded in packet-ncp2222.inc
])
pkt.ReqCondSizeVariable()
pkt.CompletionCodes([0x0000, 0x7e01, 0xc600, 0xfb06, 0xff22])
# 2222/7B46, 123/70
pkt = NCP(0x7B46, "Get Current Compressing File", 'stats')
pkt.Request(14, [
rec(10, 4, VolumeNumberLong ),
])
pkt.Reply(56, [
rec(8, 4, ParentID ),
rec(12, 4, DirectoryEntryNumber ),
rec(16, 4, compressionStage ),
rec(20, 4, ttlIntermediateBlks ),
rec(24, 4, ttlCompBlks ),
rec(28, 4, curIntermediateBlks ),
rec(32, 4, curCompBlks ),
rec(36, 4, curInitialBlks ),
rec(40, 4, fileFlags ),
rec(44, 4, projectedCompSize ),
rec(48, 4, originalSize ),
rec(52, 4, compressVolume ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0x7901, 0x9801, 0xfb06, 0xff00])
# 2222/7B47, 123/71
pkt = NCP(0x7B47, "Get Current DeCompressing File Info List", 'stats')
pkt.Request(14, [
rec(10, 4, VolumeNumberLong ),
])
pkt.Reply(24, [
#rec(8, 4, FileListCount ),
rec(8, 16, FileInfoStruct ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0x9801, 0xfb06, 0xff00])
# 2222/7B48, 123/72
pkt = NCP(0x7B48, "Get Compression and Decompression Time and Counts", 'stats')
pkt.Request(14, [
rec(10, 4, VolumeNumberLong ),
])
pkt.Reply(64, [
rec(8, 56, CompDeCompStat ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0x9801, 0xfb06, 0xff00])
# 2222/7BF9, 123/249
pkt = NCP(0x7BF9, "Set Alert Notification", 'stats')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/7BFB, 123/251
pkt = NCP(0x7BFB, "Get Item Configuration Information", 'stats')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/7BFC, 123/252
pkt = NCP(0x7BFC, "Get Subject Item ID List", 'stats')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/7BFD, 123/253
pkt = NCP(0x7BFD, "Get Subject Item List Count", 'stats')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/7BFE, 123/254
pkt = NCP(0x7BFE, "Get Subject ID List", 'stats')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/7BFF, 123/255
pkt = NCP(0x7BFF, "Get Number of NetMan Subjects", 'stats')
pkt.Request(10)
pkt.Reply(8)
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb06, 0xff00])
# 2222/8301, 131/01
pkt = NCP(0x8301, "RPC Load an NLM", 'remote')
pkt.Request(NO_LENGTH_CHECK, [
rec(10, 4, NLMLoadOptions ),
rec(14, 16, Reserved16 ),
rec(30, PROTO_LENGTH_UNKNOWN, PathAndName, info_str=(PathAndName, "RPC Load NLM: %s", ", %s") ),
])
pkt.Reply(12, [
rec(8, 4, RPCccode ),
])
pkt.CompletionCodes([0x0000, 0x7c00, 0x7e00, 0xfb07, 0xff00])
# 2222/8302, 131/02
pkt = NCP(0x8302, "RPC Unload an NLM", 'remote')
pkt.Request(NO_LENGTH_CHECK, [
rec(10, 20, Reserved20 ),
rec(30, PROTO_LENGTH_UNKNOWN, NLMName, info_str=(NLMName, "RPC Unload NLM: %s", ", %s") ),
])
pkt.Reply(12, [
rec(8, 4, RPCccode ),
])
pkt.CompletionCodes([0x0000, 0x7c00, 0x7e00, 0xfb07, 0xff00])
# 2222/8303, 131/03
pkt = NCP(0x8303, "RPC Mount Volume", 'remote')
pkt.Request(NO_LENGTH_CHECK, [
rec(10, 20, Reserved20 ),
rec(30, PROTO_LENGTH_UNKNOWN, VolumeNameStringz, info_str=(VolumeNameStringz, "RPC Mount Volume: %s", ", %s") ),
])
pkt.Reply(32, [
rec(8, 4, RPCccode),
rec(12, 16, Reserved16 ),
rec(28, 4, VolumeNumberLong ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00])
# 2222/8304, 131/04
pkt = NCP(0x8304, "RPC Dismount Volume", 'remote')
pkt.Request(NO_LENGTH_CHECK, [
rec(10, 20, Reserved20 ),
rec(30, PROTO_LENGTH_UNKNOWN, VolumeNameStringz, info_str=(VolumeNameStringz, "RPC Dismount Volume: %s", ", %s") ),
])
pkt.Reply(12, [
rec(8, 4, RPCccode ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00])
# 2222/8305, 131/05
pkt = NCP(0x8305, "RPC Add Name Space To Volume", 'remote')
pkt.Request(NO_LENGTH_CHECK, [
rec(10, 20, Reserved20 ),
rec(30, PROTO_LENGTH_UNKNOWN, AddNameSpaceAndVol, info_str=(AddNameSpaceAndVol, "RPC Add Name Space to Volume: %s", ", %s") ),
])
pkt.Reply(12, [
rec(8, 4, RPCccode ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00])
# 2222/8306, 131/06
pkt = NCP(0x8306, "RPC Set Command Value", 'remote')
pkt.Request(NO_LENGTH_CHECK, [
rec(10, 1, SetCmdType ),
rec(11, 3, Reserved3 ),
rec(14, 4, SetCmdValueNum ),
rec(18, 12, Reserved12 ),
rec(30, PROTO_LENGTH_UNKNOWN, SetCmdName, info_str=(SetCmdName, "RPC Set Command Value: %s", ", %s") ),
#
# XXX - optional string, if SetCmdType is 0
#
])
pkt.Reply(12, [
rec(8, 4, RPCccode ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00])
# 2222/8307, 131/07
pkt = NCP(0x8307, "RPC Execute NCF File", 'remote')
pkt.Request(NO_LENGTH_CHECK, [
rec(10, 20, Reserved20 ),
rec(30, PROTO_LENGTH_UNKNOWN, PathAndName, info_str=(PathAndName, "RPC Execute NCF File: %s", ", %s") ),
])
pkt.Reply(12, [
rec(8, 4, RPCccode ),
])
pkt.CompletionCodes([0x0000, 0x7e00, 0xfb07, 0xff00])
if __name__ == '__main__':
# import profile
# filename = "ncp.pstats"
# profile.run("main()", filename)
#
# import pstats
# sys.stdout = msg
# p = pstats.Stats(filename)
#
# print "Stats sorted by cumulative time"
# p.strip_dirs().sort_stats('cumulative').print_stats()
#
# print "Function callees"
# p.print_callees()
main()
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
# |
Python | wireshark/tools/netscreen2dump.py | #!/usr/bin/env python
"""
Converts netscreen snoop hex-dumps to a hex-dump that text2pcap can read.
Copyright (c) 2004 by Gilbert Ramirez <[email protected]>
SPDX-License-Identifier: GPL-2.0-or-later
"""
import sys
import re
import os
import stat
import time
class OutputFile:
TIMER_MAX = 99999.9
def __init__(self, name, base_time):
try:
self.fh = open(name, "w")
except IOError, err:
sys.exit(err)
self.base_time = base_time
self.prev_timestamp = 0.0
def PrintPacket(self, timestamp, datalines):
# What do to with the timestamp? I need more data about what
# the netscreen timestamp is, then I can generate one for the text file.
# print("TS:", timestamp.group("time"))
try:
timestamp = float(timestamp.group("time"))
except ValueError:
sys.exit("Unable to convert '%s' to floating point." %
(timestamp,))
# Did we wrap around the timeer max?
if timestamp < self.prev_timestamp:
self.base_time += self.TIMER_MAX
self.prev_timestamp = timestamp
packet_timestamp = self.base_time + timestamp
# Determine the time string to print
gmtime = time.gmtime(packet_timestamp)
subsecs = packet_timestamp - int(packet_timestamp)
assert subsecs <= 0
subsecs = int(subsecs * 10)
print >> self.fh, "%s.%d" % (time.strftime("%Y-%m-%d %H:%M:%S", gmtime), \
subsecs)
# Print the packet data
offset = 0
for lineno, hexgroup in datalines:
hexline = hexgroup.group("hex")
hexpairs = hexline.split()
print >> self.fh, "%08x %s" % (offset, hexline)
offset += len(hexpairs)
# Blank line
print >> self.fh
# Find a timestamp line
re_timestamp = re.compile(r"^(?P<time>\d+\.\d): [\w/]+\((?P<io>.)\)(:| len=)")
# Find a hex dump line
re_hex_line = re.compile(r"(?P<hex>([0-9a-f]{2} ){1,16})\s+(?P<ascii>.){1,16}")
def run(input_filename, output_filename):
try:
ifh = open(input_filename, "r")
except IOError, err:
sys.exit(err)
# Get the file's creation time.
try:
ctime = os.stat(input_filename)[stat.ST_CTIME]
except OSError, err:
sys.exit(err)
output_file = OutputFile(output_filename, ctime)
timestamp = None
datalines = []
lineno = 0
for line in ifh.xreadlines():
lineno += 1
# If we have no timestamp yet, look for one
if not timestamp:
m = re_timestamp.search(line)
if m:
timestamp = m
# Otherwise, look for hex dump lines
else:
m = re_hex_line.search(line)
if m:
datalines.append((lineno, m))
else:
# If we have been gathering hex dump lines,
# and this line is not a hex dump line, then the hex dump
# has finished, and so has the packet. So print the packet
# and reset our variables so we can look for the next packet.
if datalines:
output_file.PrintPacket(timestamp, datalines)
timestamp = None
datalines = []
# At the end of the file we may still have hex dump data in memory.
# If so, print the packet
if datalines:
output_file.PrintPacket(timestamp, datalines)
timestamp = None
datalines = []
def usage():
print >> sys.stderr, "Usage: netscreen2dump.py netscreen-dump-file new-dump-file"
sys.exit(1)
def main():
if len(sys.argv) != 3:
usage()
run(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main() |
Python | wireshark/tools/parse_xml2skinny_dissector.py | #
# Wireshark Dissector Generator for SkinnyProtocolOptimized.xml
#
# Author: Diederik de Groot <[email protected]>
# Date: 2014-7-22
# Skinny Protocol Versions: 0 through 22
#
# Heritage:
# xml2obj based on https://code.activestate.com/recipes/149368-xml2obj/
#
# Dependencies:
# python / xml / sax
#
# Called By:
# cog.py + packet-skinny.c.in for inplace code generation
# See: https://nedbatchelder.com/code/cog/
#
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import re
import xml.sax.handler
indentation = 0
indent_str = ''
fieldsArray = {}
si_fields = {
"callReference" : "si->callId",
"lineInstance": "si->lineId",
"passThroughPartyId" : "si->passThroughPartyId",
"callState" : "si->callState",
"callingParty" : "si->callingParty",
"calledParty" : "si->calledParty",
"mediaReceptionStatus" : "si->mediaReceptionStatus",
"mediaTransmissionStatus" : "si->mediaTransmissionStatus",
"multimediaReceptionStatus" : "si->multimediaReceptionStatus",
"multimediaTransmissionStatus" : "si->multimediaTransmissionStatus",
"multicastReceptionStatus" : "si->multicastReceptionStatus",
}
debug = 0
def xml2obj(src):
"""
A function to converts XML data into native Python objects.
"""
non_id_char = re.compile('[^_0-9a-zA-Z]')
def _name_mangle(name):
return non_id_char.sub('_',
name)
class DataNode(object):
def __init__(self):
self._attrs = {} # XML attributes and child elements
self.data = None # child text data
self.parent = None
self.basemessage = None
self.intsize = 0
self._children = []
self.declared = []
def __len__(self):
# treat single element as a list of 1
return 1
def __getitem__(self, key):
if isinstance(key, str):
return self._attrs.get(key,None)
else:
return [self][key]
def __contains__(self, name):
return name in self._attrs
def __bool__(self):
return bool(self._attrs or self.data)
def __getattr__(self, name):
if name.startswith('__'):
# need to do this for Python special methods???
raise AttributeError(name)
return self._attrs.get(name,None)
def _add_xml_attr(self, name, value):
if name in self._attrs:
# multiple attribute of the same name are represented by a list
children = self._attrs[name]
if not isinstance(children, list):
children = [children]
self._attrs[name] = children
children.append(value)
else:
self._attrs[name] = value
def _add_child(self, name, value):
#print "adding : %s / %s to %s" %(name,value, self.__class__)
self._children.append(value)
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def keys(self):
return self._attrs.keys()
def __repr__(self):
items = {}
if self.data:
items.append(('data', self.data))
return '{%s}' % ', '.join(['%s:%s' % (k,repr(v)) for k,v in items])
def __setitem__(self, key, value):
self._attrs[key] = value
def getfieldnames(self):
return ''
def get_req_resp_keys(self, req_resp_keys):
return []
def get_req_resp_key(self):
if self.req_resp_key == "1":
return self.name
return None
def declaration(self):
global fieldsArray
if self.name not in fieldsArray:
fieldsArray[self.name] = '/* UNKNOWN { &hf_skinny_%s,\n {\n"%s", "skinny.%s", FT_UINT32, BASE_DEC, NULL, 0x0,\n "%s", HFILL }}, */\n' %(self.name, self.name, self.name, self.comment)
return ''
def dissect(self):
return self.name or ''
def incr_indent(self):
global indentation
global indent_str
indentation += 1
indent_str = ''
for x in range(0, indentation):
indent_str += ' '
def decr_indent(self):
global indentation
global indent_str
indentation -= 1
indent_str = ''
for x in range(0, indentation):
indent_str += ' '
def indent_out(self, string):
return indent_str + string
class Message(DataNode):
''' Message '''
def __str__(self):
return self.name
def gen_handler(self):
if self.fields is None:
# skip whole message and return NULL as handler
return 'NULL'
return 'handle_%s' %self.name
def dissect(self):
ret = ''
declarations = 0
fixed = 0
if (self.fields is not None):
ret += self.indent_out("/*\n")
ret += self.indent_out(" * Message: %s\n" %self.name)
ret += self.indent_out(" * Opcode: %s\n" %self.opcode)
ret += self.indent_out(" * Type: %s\n" %self.type)
ret += self.indent_out(" * Direction: %s\n" %self.direction)
ret += self.indent_out(" * VarLength: %s\n" %self.dynamic)
ret += self.indent_out(" * MsgType: %s\n" %self.msgtype)
if self.comment:
ret += self.indent_out(" * Comment: %s\n" %self.comment)
ret += self.indent_out(" */\n")
ret += self.indent_out("static void\n")
ret += self.indent_out("handle_%s(ptvcursor_t *cursor, packet_info * pinfo _U_, skinny_conv_info_t * skinny_conv _U_)\n" %self.name)
ret += self.indent_out("{\n")
self.incr_indent()
for fields in self.fields:
if fields.size_lt or fields.size_gt:
if self.basemessage.declared is None or "hdr_data_length" not in self.basemessage.declared:
ret += self.indent_out("uint32_t hdr_data_length = tvb_get_letohl(ptvcursor_tvbuff(cursor), 0);\n")
self.basemessage.declared.append("hdr_data_length")
declarations += 1
if fields.fixed == "yes":
fixed = 1
if not declarations or fixed == 1:
for fields in self.fields[1:]:
if self.basemessage.declared is None or "hdr_version" not in self.basemessage.declared:
ret += self.indent_out("uint32_t hdr_version = tvb_get_letohl(ptvcursor_tvbuff(cursor), 4);\n")
self.basemessage.declared.append("hdr_version")
declarations += 1
req_resp_keys = []
for fields in self.fields:
fields.get_req_resp_keys(req_resp_keys)
ret += '%s' %fields.declaration()
declarations += 1
if declarations > 1:
ret += "\n"
if self.fields is not None:
for fields in self.fields:
ret += '%s' %fields.dissect()
# setup request/response
if self.msgtype == "request":
if req_resp_keys and req_resp_keys[0] != '':
ret += self.indent_out('skinny_reqrep_add_request(cursor, pinfo, skinny_conv, %s ^ %s);\n' %(self.opcode, req_resp_keys[0]))
else:
ret += self.indent_out('skinny_reqrep_add_request(cursor, pinfo, skinny_conv, %s);\n' %(self.opcode))
if self.msgtype == "response":
if req_resp_keys and req_resp_keys[0] != '':
ret += self.indent_out('skinny_reqrep_add_response(cursor, pinfo, skinny_conv, %s ^ %s);\n' %(self.request, req_resp_keys[0]))
else:
ret += self.indent_out('skinny_reqrep_add_response(cursor, pinfo, skinny_conv, %s);\n' %(self.request))
self.decr_indent()
ret += "}\n\n"
return ret
class Fields(DataNode):
''' Fields '''
size_fieldnames= []
def get_req_resp_keys(self, req_resp):
for field in self._children:
key = field.get_req_resp_key()
if not key is None and not key in req_resp:
req_resp.append(key)
def declaration(self):
ret = ''
for field in self._children:
ret += '%s' %(field.declaration())
self.intsize += field.intsize
return ret
def dissect(self, lookupguide=""):
ret = ''
ifstarted = 0
#ret += "/* [PARENT: %s, BASEMESSAGE: %s] */\n" %(self.parent.name,self.basemessage.name)
if ((self.beginversion or self.endversion) and (self.beginversion != "0" or self.endversion != "22")):
ifstarted = 1
ret += self.indent_out('if (')
if (self.beginversion and self.beginversion != "0"):
if (not self.endversion or self.endversion == "22"):
ret += 'hdr_version >= V%s_MSG_TYPE) {\n' %self.beginversion
else:
ret += 'hdr_version >= V%s_MSG_TYPE && ' %self.beginversion
if (self.endversion and self.endversion != "22"):
ret += 'hdr_version <= V%s_MSG_TYPE) {\n' %self.endversion
self.incr_indent()
if self.size_lt:
ret += self.indent_out('if (hdr_data_length < %s) {\n' %self.size_lt)
self.incr_indent()
if self.size_gt:
ret += self.indent_out('if (hdr_data_length > %s) {\n' %self.size_gt)
self.incr_indent()
# generate dissection
for field in self._children:
ret += '%s' %(field.dissect())
if self.size_lt:
self.decr_indent()
ret += self.indent_out('}\n')
if self.size_gt:
self.decr_indent()
ret += self.indent_out('}\n')
if ifstarted:
self.decr_indent()
ret += self.indent_out('}\n')
return ret;
class Integer(DataNode):
def __init__(self):
DataNode.__init__(self)
self.intsize = 0
self.endian = "ENC_LITTLE_ENDIAN"
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def declaration(self):
ret = ''
int_sizes = {'uint32':4,'uint16':2,'uint8':1,'int32':4,'int16':2,'int8':1,'ipport':4}
if self.endianness == "big":
self.endian = "ENC_BIG_ENDIAN"
if self.type in int_sizes:
self.intsize = int_sizes[self.type]
else:
print(("ERROR integer %s with type: %s, could not be found" %(self.name, self.type)))
if self.declare == "yes" or self.make_additional_info == "yes":
if self.basemessage.declared is None or self.name not in self.basemessage.declared:
ret += self.indent_out(f'uint{self.intsize * 8}_t {self.name} = 0;\n')
self.basemessage.declared.append(self.name)
global fieldsArray
if self.name not in fieldsArray:
fieldsArray[self.name] ='{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_UINT%d, BASE_DEC, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), self.intsize * 8, '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
return ret
def dissect(self):
ret = ''
size = 0
if self.size_fieldname:
if self.basemessage.dynamic == "yes":
size = self.size_fieldname
else:
size = self.maxsize
elif self.size:
size = self.size
if size:
if self.size_fieldname:
ret += self.indent_out('if (%s <= %s) {%s\n' %(self.size_fieldname, size, ' /* tvb integer size guard */' if debug else ''))
else:
ret += self.indent_out('{\n')
self.incr_indent()
variable = 'counter_%d' %indentation
ret += self.indent_out('uint32_t %s = 0;\n' %(variable));
if self.size_fieldname:
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [ref:%s = %%d, max:%s]", %s);\n' %(self.name, self.size_fieldname, size, self.size_fieldname))
else:
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [max:%s]");\n' %(self.name, size))
ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable));
if self.basemessage.dynamic == "no" and self.size_fieldname:
self.incr_indent()
ret += self.indent_out('if (%s < %s) {\n' %(variable,self.size_fieldname))
self.incr_indent()
if self.declare == "yes" or self.make_additional_info == "yes":
if self.endianness == "big":
if (self.intsize == 4):
ret += self.indent_out('%s = tvb_get_ntohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
elif (self.intsize == 2):
ret += self.indent_out('%s = tvb_get_ntohs(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
else:
ret += self.indent_out('%s = tvb_get_guint8(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
else:
if (self.intsize == 4):
ret += self.indent_out('%s = tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
elif (self.intsize == 2):
ret += self.indent_out('%s = tvb_get_letohs(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
else:
ret += self.indent_out('%s = tvb_get_guint8(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
if self.name in si_fields.keys():
if self.endianness == "big":
ret += self.indent_out('%s = tvb_get_ntohs(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(si_fields[self.name]))
else:
ret += self.indent_out('%s = tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(si_fields[self.name]))
ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, %d, %s);\n' %(self.name, self.intsize, self.endian))
if size:
if self.basemessage.dynamic == "no" and self.size_fieldname:
self.decr_indent()
ret += self.indent_out('} else {\n')
ret += self.indent_out(' ptvcursor_advance(cursor, %d);\n' %self.intsize)
ret += self.indent_out('}\n')
self.decr_indent()
ret += self.indent_out('}\n')
if debug:
ret += self.indent_out('ptvcursor_pop_subtree(cursor); /* end for loop tree: %s */\n' %(self.name))
else:
ret += self.indent_out('ptvcursor_pop_subtree(cursor);\n')
self.decr_indent()
if self.size_fieldname:
ret += self.indent_out('} else {\n')
self.incr_indent()
ret += self.indent_out('ptvcursor_advance(cursor, (%s * %s));%s\n' %(size, self.intsize, ' /* guard kicked in -> skip the rest */;' if debug else ''))
self.decr_indent()
ret += self.indent_out('}\n')
if self.make_additional_info == "yes":
ret += self.indent_out('srtp_add_address(pinfo, PT_UDP, &%s, %s, 0, "SKINNY", pinfo->num, false, NULL, NULL, NULL);\n' %(self.use_param, self.name))
ret += self.indent_out('%s_str = address_to_display(NULL, &%s);\n' % (self.use_param, self.use_param))
ret += self.indent_out('si->additionalInfo = ws_strdup_printf("%%s:%%d", %s_str, %s);\n' % (self.use_param, self.name))
ret += self.indent_out('wmem_free(NULL, %s_str);\n' % (self.use_param))
return ret
class Enum(DataNode):
def __init__(self):
DataNode.__init__(self)
self.intsize = 0
self.sparse = 0
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def declaration(self):
ret = ''
prevvalue = 0
enum_sizes = {'uint32':4,'uint16':2,'uint8':1}
if self.type in enum_sizes:
self.intsize = enum_sizes[self.type]
else:
print(("ERROR enum %s with type: %s, could not be found" %(self.name, self.type)))
if self.declare == "yes":
if self.basemessage.declared is None or self.name not in self.basemessage.declared:
ret += self.indent_out('g%s %s = 0;\n' %(self.type, self.name))
self.basemessage.declared.append(self.name)
global fieldsArray
if self.name not in fieldsArray:
fieldsArray[self.name] ='{&hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_UINT%d, BASE_HEX | BASE_EXT_STRING, &%s_ext, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), self.intsize * 8, self.subtype[0].upper() + self.subtype[1:], '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
return ret
def dissect(self):
ret = ''
endian = "ENC_LITTLE_ENDIAN"
size = 0
if self.size_fieldname:
if self.basemessage.dynamic == "yes":
size = self.size_fieldname
else:
size = self.maxsize
elif self.size:
size = self.size
if self.make_additional_info == "yes":
ret += self.indent_out('si->additionalInfo = ws_strdup_printf("\\"%s\\"",\n')
self.incr_indent()
ret += self.indent_out('try_val_to_str_ext(\n')
self.incr_indent()
ret += self.indent_out('tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor)),\n')
ret += self.indent_out('&%s_ext\n' %(self.subtype[0].upper() + self.subtype[1:]))
self.decr_indent()
ret += self.indent_out(')\n')
self.decr_indent()
ret += self.indent_out(');\n')
if self.make_additional_info_short == "yes":
ret += self.indent_out('si->additionalInfo = ws_strdup_printf("\\"%s\\"",\n')
self.incr_indent()
ret += self.indent_out('try_val_to_str_ext(\n')
self.incr_indent()
ret += self.indent_out('tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor)),\n')
ret += self.indent_out('&%s_short_ext\n' %(self.subtype[0].upper() + self.subtype[1:]))
self.decr_indent()
ret += self.indent_out(')\n')
self.decr_indent()
ret += self.indent_out(');\n')
if size:
if self.size_fieldname:
ret += self.indent_out('if (%s <= %s) { /* tvb enum size guard */\n' %(self.size_fieldname, self.maxsize))
else:
ret += self.indent_out('{\n')
self.incr_indent()
variable = 'counter_%d' %indentation
ret += self.indent_out('uint32_t %s = 0;\n' %(variable));
if self.size_fieldname:
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [ref: %s = %%d, max:%s]", %s);\n' %(self.name, self.size_fieldname, size, self.size_fieldname))
else:
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [max:%s]");\n' %(self.name, size))
ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable));
if self.basemessage.dynamic == "no" and self.size_fieldname:
self.incr_indent()
ret += self.indent_out('if (%s < %s) {\n' %(variable,self.size_fieldname))
self.incr_indent()
if self.name in si_fields.keys():
ret += self.indent_out('%s = tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(si_fields[self.name]))
if self.declare == "yes":
if (self.intsize == 4):
ret += self.indent_out('%s = tvb_get_letohl(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
elif (self.intsize == 2):
ret += self.indent_out('%s = tvb_get_letohs(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
else:
ret += self.indent_out('%s = tvb_get_guint8(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor));\n' %(self.name))
ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, %d, %s);\n' %(self.name, self.intsize, endian))
if size:
if self.basemessage.dynamic == "no" and self.size_fieldname:
self.decr_indent()
ret += self.indent_out('} else {\n')
ret += self.indent_out(' ptvcursor_advance(cursor, 4);\n')
ret += self.indent_out('}\n')
self.decr_indent()
ret += self.indent_out('}\n')
if debug:
ret += self.indent_out('ptvcursor_pop_subtree(cursor); /* end for loop tree: %s */\n' %(self.name))
else:
ret += self.indent_out('ptvcursor_pop_subtree(cursor);\n')
self.decr_indent()
if self.size_fieldname:
ret += self.indent_out('} else {\n')
self.incr_indent()
ret += self.indent_out('ptvcursor_advance(cursor, (%s * %s)); /* guard kicked in -> skip the rest */;\n' %(size, self.intsize))
self.decr_indent()
ret += self.indent_out('}\n')
return ret
class String(DataNode):
def __init__(self):
DataNode.__init__(self)
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def get_req_resp_key(self):
if self.req_resp_key == "1":
return 'wmem_str_hash(%s)' %self.name
return None
def declaration(self):
ret = ''
self.intsize = 0
if self.size:
if self.size=="VariableDirnumSize":
self.intsize = 24
else:
self.intsize = int(self.size)
elif self.maxsize and self.basemessage.dynamic == "no":
self.intsize = int(self.maxsize)
if self.declare == "yes":
if self.size=="VariableDirnumSize":
if self.basemessage.declared is None or "VariableDirnumSize" not in self.basemessage.declared:
if self.basemessage.declared is None or "hdr_version" not in self.basemessage.declared:
#if (self.basemessage.fields is not None and len(self.basemessage.fields) == 1):
ret += self.indent_out('uint32_t hdr_version = tvb_get_letohl(ptvcursor_tvbuff(cursor), 4);\n')
self.basemessage.declared.append("hdr_version")
ret += self.indent_out('uint32_t VariableDirnumSize = (hdr_version >= V18_MSG_TYPE) ? 25 : 24;\n')
self.basemessage.declared.append("VariableDirnumSize")
#else:
# if self.basemessage.declared is None or self.name not in self.basemessage.declared:
# ret += self.indent_out('char *%s = NULL;\n' %self.name)
# self.basemessage.declared.append(self.name)
if self.basemessage.dynamic == "yes" and not self.subtype == "DisplayLabel":
if self.basemessage.declared is None or self.name + '_len' not in self.basemessage.declared:
ret += self.indent_out('uint32_t %s_len = 0;\n' %self.name)
self.basemessage.declared.append(self.name + '_len')
global fieldsArray
if self.name not in fieldsArray:
fieldsArray[self.name] = '{&hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_STRING, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
return ret
def dissect(self):
ret = ''
if self.declare == "yes" and self.size != "VariableDirnumSize":
ret += self.indent_out('const char * %s = g_strdup(tvb_format_stringzpad(pinfo->pool, ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), %s));\n' %(self.name, self.size))
if self.subtype == "DisplayLabel":
if self.basemessage.dynamic == "yes":
ret += self.indent_out('dissect_skinny_displayLabel(cursor, pinfo, hf_skinny_%s, 0);\n' %(self.name))
elif self.size_fieldname:
ret += self.indent_out('dissect_skinny_displayLabel(cursor, pinfo, hf_skinny_%s, %s);\n' %(self.name, self.size_fieldname))
else:
ret += self.indent_out('dissect_skinny_displayLabel(cursor, pinfo, hf_skinny_%s, %s);\n' %(self.name, self.size))
elif self.basemessage.dynamic == "yes":
ret += self.indent_out('%s_len = tvb_strnlen(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), -1)+1;\n' %self.name)
ret += self.indent_out('if (%s_len > 1) {\n' %self.name)
if self.name in si_fields.keys():
ret += self.indent_out(' %s = g_strdup(tvb_format_stringzpad(pinfo->pool, ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), %s_len));\n' %(si_fields[self.name], self.name))
ret += self.indent_out(' ptvcursor_add(cursor, hf_skinny_%s, %s_len, ENC_ASCII);\n' %(self.name, self.name))
ret += self.indent_out('} else {\n')
ret += self.indent_out(' ptvcursor_advance(cursor, 1);\n')
ret += self.indent_out('}\n')
elif self.size_fieldname:
if self.name in si_fields.keys():
ret += self.indent_out('%s = g_strdup(tvb_format_stringzpad(pinfo->pool, ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), %s));\n' %(si_fields[self.name], self.size_fieldname))
ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, %s, ENC_ASCII);\n' %(self.name, self.size_fieldname))
else:
if self.name in si_fields.keys():
ret += self.indent_out('%s = g_strdup(tvb_format_stringzpad(pinfo->pool, ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), %s));\n' %(si_fields[self.name], self.size))
if self.make_additional_info == "yes":
ret += self.indent_out('uint32_t %s_len;\n' %(self.name))
if self.size=="VariableDirnumSize":
ret += self.indent_out('%s_len = tvb_strnlen(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), VariableDirnumSize)+1;\n' %(self.name))
else:
ret += self.indent_out('%s_len = tvb_strnlen(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), 24)+1;\n' %(self.name))
ret += self.indent_out('if (%s_len > 1) {\n' %(self.name))
self.incr_indent()
ret += self.indent_out('si->additionalInfo = ws_strdup_printf("\\"%%s\\"", tvb_format_stringzpad(pinfo->pool, ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), %s_len));\n' %(self.name))
self.decr_indent()
ret += self.indent_out('}\n')
ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, %s, ENC_ASCII);\n' %(self.name, self.size))
return ret
class Ether(DataNode):
def __init__(self):
DataNode.__init__(self)
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def declaration(self):
ret = ''
self.intsize = 6
if self.size:
self.intsize = int(self.size)
elif self.maxsize and self.basemessage.dynamic == "no":
self.intsize = int(self.maxsize)
if self.declare == "yes":
if self.basemessage.declared is None or self.name not in self.basemessage.declared:
ret += self.indent_out('uint32_t %s = 0;\n' %self.name)
self.basemessage.declared.append(self.name)
if self.basemessage.dynamic == "yes":
if self.basemessage.declared is None or self.name + '_len' not in self.basemessage.declared:
ret += self.indent_out('uint32_t %s_len = 0;\n' %self.name)
self.basemessage.declared.append(self.name + '_len')
global fieldsArray
if self.name not in fieldsArray:
fieldsArray[self.name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_ETHER, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
return ret
def dissect(self):
ret = ''
if self.basemessage.dynamic == "yes":
ret += self.indent_out('%s_len = tvb_strnlen(ptvcursor_tvbuff(cursor), ptvcursor_current_offset(cursor), -1)+1;\n' %self.name)
ret += self.indent_out('if (%s_len > 1) {\n' %self.name)
ret += self.indent_out(' ptvcursor_add(cursor, hf_skinny_%s, 6, ENC_NA);\n' %(self.name))
ret += self.indent_out(' ptvcursor_advance(cursor, %s_len - 6);\n' %(self.name))
ret += self.indent_out('} else {\n')
ret += self.indent_out(' ptvcursor_advance(cursor, 1);\n')
ret += self.indent_out('}\n')
elif self.size_fieldname:
ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, 6, ENC_NA);\n' %(self.name))
ret += self.indent_out('ptvcursor_advance(cursor, %s - 6);\n' %(self.size_fieldname))
else:
ret += self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, 6, ENC_NA);\n' %(self.name))
ret += self.indent_out('ptvcursor_advance(cursor, %s - 6);\n' %(self.size))
return ret
class BitField(DataNode):
def __init__(self):
DataNode.__init__(self)
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def declaration(self):
global fieldsArray
ret = ''
int_sizes = {'uint32':4,'uint16':2,'uint8':1,'int32':4,'int16':2,'int8':1}
self.intsize = 0
if self.size in int_sizes:
self.intsize = int_sizes[self.size]
for entries in self.entries:
for entry in entries.entry:
if entry.name not in fieldsArray:
fieldsArray[entry.name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_BOOLEAN, %d, TFS(&tfs_yes_no), %s,\n %s, HFILL }},\n' %(entry.name, entry.text, entry.name.replace("_","."), self.intsize * 8, entry.value, '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
return ret
def dissect(self):
ret = ''
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s");\n' %(self.name))
for entries in self.entries:
for entry in entries.entry:
ret += self.indent_out('ptvcursor_add_no_advance(cursor, hf_skinny_%s, %d, ENC_LITTLE_ENDIAN);\n' %(entry.name, self.intsize))
ret += self.indent_out('ptvcursor_advance(cursor, %d);\n' %(self.intsize))
ret += self.indent_out('ptvcursor_pop_subtree(cursor); /* end bitfield: %s */\n' %(self.name))
return ret
class Ip(DataNode):
def __init__(self):
DataNode.__init__(self)
self.intsize = 4
if self.type == "ipv6":
self.intsize = 16
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def declaration(self):
global fieldsArray
if self.name not in fieldsArray:
if self.type == "ipv4":
fieldsArray[self.name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_IPv4, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
else:
fieldsArray[self.name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_IPv6, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
return ''
def dissect(self):
if self.type == "ipv4":
return self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, 4, ENC_BIG_ENDIAN);\n' %self.name)
else:
return self.indent_out('ptvcursor_add(cursor, hf_skinny_%s, 16, ENC_NA);\n' %self.name)
class Ipv4or6(DataNode):
def __init__(self):
DataNode.__init__(self)
self.intsize = 4
if self.endianness is None:
self.intsize += 16
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def declaration(self):
global fieldsArray
ret = ''
name = self.name + '_ipv4'
if name not in fieldsArray:
fieldsArray[name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_IPv4, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(name, self.name + ' IPv4 Address', name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
name = self.name + '_ipv6'
if name not in fieldsArray:
fieldsArray[name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_IPv6, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(name, self.name + ' IPv6 Address', name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
if self.make_additional_info == "yes":
if self.basemessage.declared is None or self.name not in self.basemessage.declared:
ret += self.indent_out('address %s;\n' %(self.name))
ret += self.indent_out('char *%s_str = NULL;\n' %(self.name))
self.basemessage.declared.append(self.name)
return ret
def dissect(self):
ret = ''
if self.make_additional_info == "yes":
ret += self.indent_out('read_skinny_ipv4or6(cursor, &%s);\n' %(self.name));
ret += self.indent_out('dissect_skinny_ipv4or6(cursor, hf_skinny_%s_ipv4, hf_skinny_%s_ipv6);\n' %(self.name, self.name));
return ret;
class XML(DataNode):
def __init__(self):
DataNode.__init__(self)
self.intsize = 0
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def declaration(self):
global fieldsArray
if self.size:
self.intsize = int(self.size)
elif self.maxsize:
self.intsize = int(self.maxsize)
if self.name not in fieldsArray:
fieldsArray[self.name] = '{ &hf_skinny_%s,\n {\n "%s", "skinny.%s", FT_STRING, BASE_NONE, NULL, 0x0,\n %s, HFILL }},\n' %(self.name, self.comment if (self.comment and self.longcomment) else self.name, self.name.replace("_","."), '"' + self.longcomment + '"' if self.longcomment else '"' + self.comment + '"' if self.comment else 'NULL')
return ''
def dissect(self):
ret = ''
if self.size_fieldname:
ret += self.indent_out('dissect_skinny_xml(cursor, hf_skinny_%s, pinfo, %s, %d);\n' %(self.name, self.size_fieldname, self.intsize))
else:
ret += self.indent_out('dissect_skinny_xml(cursor, hf_skinny_%s, pinfo, 0, %d);\n' %(self.name, self.intsize))
return ret
class Code(DataNode):
def __init__(self):
DataNode.__init__(self)
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def declaration(self):
return ''
def dissect(self):
ret = ''
if self.type == "calling_and_called_party":
params = self.use_param.split(',')
ret += self.indent_out('if (si->%s && si->%s) {\n' %(params[0], params[1]))
self.incr_indent()
ret += self.indent_out('si->additionalInfo = ws_strdup_printf("\\"%%s -> %%s\\"", si->%s, si->%s);\n' %(params[0], params[1]))
self.decr_indent()
ret += self.indent_out('}\n')
return ret
class Struct(DataNode):
def __str__(self):
return '// Struct : %s / %s / %s / %s\n' %(self.name, self.size, self.field_sizename, self.maxsize)
def declaration(self):
ret = ''
if (self.fields is not None and len(self.fields)):
if (len(self.fields) > 1):
if self.basemessage.declared is None or "hdr_version" not in self.basemessage.declared:
ret += self.indent_out("uint32_t hdr_version = tvb_get_letohl(ptvcursor_tvbuff(cursor), 4);\n")
self.basemessage.declared.append("hdr_version")
for fields in self.fields:
ret += '%s' %fields.declaration()
#self.intsize += fields.intsize
self.intsize = fields.intsize
return ret
def dissect(self):
ret = ''
variable = 'counter_%d' %indentation
size = 0
if self.size_fieldname:
#if self.basemessage.dynamic == "yes":
# size = self.size_fieldname
#else:
# size = self.maxsize
size = self.maxsize
elif self.size:
size = self.size
if size:
if self.size_fieldname:
ret += self.indent_out('if (%s <= %s) {%s\n' %(self.size_fieldname, size, ' /* tvb struct size guard */' if debug else ''))
else:
ret += self.indent_out('{\n')
self.incr_indent()
if debug:
ret += self.indent_out('/* start struct : %s / size: %d */\n' %(self.name, self.intsize))
ret += self.indent_out('uint32_t %s = 0;\n' %(variable));
if self.size_fieldname:
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [ref:%s = %%d, max:%s]", %s);\n' %(self.name, self.size_fieldname, self.maxsize, self.size_fieldname))
if self.maxsize:
ret += self.indent_out('if (%s && tvb_get_letohl(ptvcursor_tvbuff(cursor), 0) + 8 >= ptvcursor_current_offset(cursor) + (%s * %s) && %s <= %s) {%s\n' %(self.size_fieldname, self.size_fieldname, self.intsize, self.size_fieldname, self.maxsize, '/* tvb counter size guard */' if debug else ''))
else:
ret += self.indent_out('if (%s && tvb_get_letohl(ptvcursor_tvbuff(cursor), 0) + 8 >= ptvcursor_current_offset(cursor) + (%s * %s)) {%s\n' %(self.size_fieldname, self.size_fieldname, self.intsize, '/* tvb counter size guard */' if debug else ''))
self.incr_indent()
else:
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [max:%s]");\n' %(self.name, size))
ret += self.indent_out('for (%s = 0; %s < %s; %s++) {\n' %(variable, variable, size, variable));
if self.basemessage.dynamic == "no" and self.size_fieldname:
self.incr_indent()
ret += self.indent_out('if (%s < %s) {\n' %(variable,self.size_fieldname))
self.incr_indent()
else:
if debug:
ret += self.indent_out('{ /* start struct : %s / size: %d */\n' %(self.name, self.intsize))
else:
ret += self.indent_out('{\n')
self.incr_indent()
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s");\n' %(self.name))
if size:
if self.size_fieldname:
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [%%d / %%d]", %s + 1, %s);\n' %(self.name, variable, self.size_fieldname))
else:
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s [%%d / %%d]", %s + 1, %s);\n' %(self.name, variable, size))
if (self.fields is not None and len(self.fields)):
for fields in self.fields:
ret += '%s' %fields.dissect()
if self.basemessage.dynamic == "no" and self.size_fieldname:
self.decr_indent()
ret += self.indent_out('} else {\n')
ret += self.indent_out(' ptvcursor_advance(cursor, %d);\n' %(self.intsize))
ret += self.indent_out('}\n')
if size:
ret += self.indent_out('ptvcursor_pop_subtree(cursor);\n')
self.decr_indent()
if debug:
ret += self.indent_out('} /* end for loop tree: %s */\n' %self.name)
else:
ret += self.indent_out('}\n')
if self.size_fieldname:
self.decr_indent()
ret += self.indent_out('} /* end counter tvb size guard */\n' if debug else '}\n')
ret += self.indent_out('ptvcursor_pop_subtree(cursor);\n')
if debug:
ret += self.indent_out('/* end struct: %s */\n' %self.name)
self.decr_indent()
if self.size_fieldname:
ret += self.indent_out('} else {\n')
self.incr_indent()
ret += self.indent_out('ptvcursor_advance(cursor, (%s * %s));%s\n' %(self.size_fieldname, self.intsize, ' /* guard kicked in -> skip the rest */' if debug else ''));
self.decr_indent()
ret += self.indent_out('} /* end struct size guard */\n' if debug else '}\n')
return ret
class Union(DataNode):
def __str__(self):
return '%s:%s' %(self.__class__,self.name)
def declaration(self):
ret = ''
self.maxsize = 0
if (self.fields is not None and len(self.fields)):
if (len(self.fields) > 1):
if self.basemessage.declared is None or "hdr_version" not in self.basemessage.declared:
ret += self.indent_out("uint32_t hdr_version = tvb_get_letohl(ptvcursor_tvbuff(cursor), 4);\n")
self.basemessage.declared.append("hdr_version")
for fields in self.fields:
ret += '%s' %fields.declaration()
previous_lookup_eq = fields._children[0].lookup_eq
previous_lookup_le = fields._children[0].lookup_le
previous_lookup_ge = fields._children[0].lookup_ge
self.runningtotal = 0
for field in fields._children:
if previous_lookup_eq != field.lookup_eq or previous_lookup_le != field.lookup_le or previous_lookup_ge == field.lookup_ge:
previous_lookup_eq = field.lookup_eq
previous_lookup_le = field.lookup_le
previous_lookup_ge = field.lookup_ge
self.runningtotal = 0
self.runningtotal += field.intsize
if self.runningtotal > self.maxsize:
self.maxsize = self.runningtotal
self.intsize = self.maxsize
return ret
def dissect(self):
ret = ''
ifblock = self.indent_out('if')
skip = 0
#ret += self.indent_out('/* Union : %s / maxsize: %s */\n' %(self.name, self.maxsize))
if (self.fields is not None and len(self.fields)):
for fields in self.fields:
for field in fields._children:
if self.lookup_guide and (field.lookup_ge or field.lookup_le or field.lookup_eq):
lookupguide = self.lookup_guide
# start block
subtree_text = ''
if field.lookup_ge and field.lookup_le:
ret += '%s (%s >= %s && %s <= %s)' %(ifblock, lookupguide, field.lookup_ge.upper(), lookupguide, field.lookup_le.upper())
subtree_text = "%s <= %s <= %s" %(field.lookup_ge, lookupguide, field.lookup_le)
elif field.lookup_ge:
ret += '%s (%s >= %s)' %(ifblock, lookupguide, field.lookup_ge.upper())
subtree_text = "%s >= %s" %(lookupguide, field.lookup_ge)
elif field.lookup_le:
ret += '%s (%s <= %s)' %(ifblock, lookupguide, field.lookup_le.upper())
subtree_text = "%s <= %s" %(lookupguide, field.lookup_le)
elif field.lookup_eq:
if field.lookup_eq == "*":
ret += ' else'
subtree_text = "any %s" %(lookupguide)
elif field.lookup_eq == "skip":
continue
else:
ret += '%s (%s == %s)' %(ifblock, lookupguide, field.lookup_eq.upper())
subtree_text = "%s is %s" %(lookupguide, field.lookup_eq)
ret += self.indent_out(' {\n')
self.incr_indent()
if debug:
ret += self.indent_out('/* start union : %s / maxsize: %s */\n' %(self.name, self.maxsize))
currsize = 0
# dissect field
ret += self.indent_out('ptvcursor_add_text_with_subtree(cursor, SUBTREE_UNDEFINED_LENGTH, ett_skinny_tree, "%s");\n' %subtree_text)
ret += '%s' %field.dissect()
ret += self.indent_out('ptvcursor_pop_subtree(cursor);\n')
currsize += field.intsize
# compensate length
if (self.maxsize - currsize) > 0:
ret += self.indent_out('ptvcursor_advance(cursor, %d);\n' %(self.maxsize - currsize))
self.decr_indent()
# close block
ret += self.indent_out('}')
ifblock = ' else if'
else:
ret += '/* ERROR %s, missing lookup_guide */' %field.dissect()
ret += '\n'
return ret
class TreeBuilder(xml.sax.handler.ContentHandler):
def __init__(self):
self.stack = []
self.root = DataNode()
self.previous = self.root
self.current = self.root
self.basemessage = None
self.text_parts = []
def startElement(self, name, attrs):
objecttype = {"message": Message(), "fields": Fields(), "enum" : Enum(), "bitfield" : BitField(), "struct": Struct(), "union": Union(), "integer": Integer(), "string": String(), "ether": Ether(), "ip": Ip(), "ipv4or6": Ipv4or6(), "xml": XML(), "code": Code()}
self.previous = self.current
self.stack.append((self.current, self.text_parts))
if name in objecttype.keys():
self.current = objecttype[name]
else:
self.current = DataNode()
if name == "message":
self.basemessage = self.current
self.text_parts = []
#self.children = []
self.current.parent = self.previous
self.current.basemessage = self.basemessage
# xml attributes --> python attributes
for k, v in list(attrs.items()):
self.current._add_xml_attr(_name_mangle(k), v)
def endElement(self, name):
text = ''.join(self.text_parts).strip()
if text:
self.current.data = text
if self.current._attrs:
obj = self.current
else:
# a text only node is simply represented by the string
obj = text or ''
self.current, self.text_parts = self.stack.pop()
self.current._add_xml_attr(_name_mangle(name), obj)
self.current._add_child(_name_mangle(name), obj)
def characters(self, content):
self.text_parts.append(content)
builder = TreeBuilder()
xml.sax.parse(src, builder)
return list(builder.root._attrs.values())[0]
# skinny = xml2obj('SkinnyProtocolOptimized.xml')
# for message in skinny.message:
# print '%s' %message.dissect()
#if __name__ == '__main__':
# import timeit
# print(timeit.timeit("generateMessageDissectors()", setup="from __main__ import generateMessageDissectors"))
#skinny = xml2obj('SkinnyProtocolOptimized.xml')
#for message in skinny.message:
# print(message)
# message.dissect()
#for key,value in fieldsArray.items():
# print "%s : %s" %(key,value)
#print '%r\n' %fieldsArray
#skinny = xml2obj('SkinnyProtocolOptimized.xml')
#for message in skinny.message:
# print message.declaration() |
Python | wireshark/tools/pkt-from-core.py | #!/usr/bin/env python
"""
Retrieve a packet from a wireshark/tshark core file
and save it in a packet-capture file.
"""
# Copyright (C) 2013 by Gilbert Ramirez <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0-or-later
import getopt
import os
import re
import sys
import tempfile
exec_file = None
core_file = None
output_file = None
verbose = 0
debug = 0
class BackTrace:
re_frame = re.compile(r"^#(?P<num>\d+) ")
re_func1 = re.compile(r"^#\d+\s+(?P<func>\w+) \(")
re_func2 = re.compile(r"^#\d+\s+0x[A-Fa-f\d]+ in (?P<func>\w+) \(")
def __init__(self, lines):
# In order; each item is the function name.
self.frames = []
found_non_bt_frame = 0
frame_will_be = 0
for line in lines:
m = self.re_frame.search(line)
if m:
# Skip the first frame that gdb shows,
# which is not part of the backtrace.
if not found_non_bt_frame:
found_non_bt_frame = 1
continue
# Get the frame number and make sure it's
# what we expect it should be.
frame_num = int(m.group("num"))
if frame_num != frame_will_be:
sys.exit("Found frame %d instead of %d" %
(frame_num, frame_will_be))
# Find the function name. XXX - need to handle '???'
n = self.re_func1.search(line)
if not n:
n = self.re_func2.search(line)
if n:
func = n.group("func")
else:
sys.exit("Function name not found in %s" % (line,))
# Save the info
self.frames.append(func)
frame_will_be += 1
def Frames(self):
return self.frames
def HasFunction(self, func):
return func in self.frames
def Frame(self, func):
return self.frames.index(func)
# Some values from wiretap; wiretap should be a shared
# libray and a Python module should be created for it so
# this program could just write a libpcap file directly.
WTAP_ENCAP_NONE = -2
WTAP_ENCAP_PER_PACKET = -1
WTAP_ENCAP_UNKNOWN = 0
WTAP_ENCAP_ETHERNET = 1
WTAP_ENCAP_TOKEN_RING = 2
WTAP_ENCAP_SLIP = 3
WTAP_ENCAP_PPP = 4
WTAP_ENCAP_FDDI = 5
WTAP_ENCAP_FDDI_BITSWAPPED = 6
WTAP_ENCAP_RAW_IP = 7
WTAP_ENCAP_ARCNET = 8
WTAP_ENCAP_ATM_RFC1483 = 9
WTAP_ENCAP_LINUX_ATM_CLIP = 10
WTAP_ENCAP_LAPB = 11
WTAP_ENCAP_ATM_SNIFFER = 12
WTAP_ENCAP_NULL = 13
WTAP_ENCAP_ASCEND = 14
WTAP_ENCAP_LAPD = 15
WTAP_ENCAP_V120 = 16
WTAP_ENCAP_PPP_WITH_PHDR = 17
WTAP_ENCAP_IEEE_802_11 = 18
WTAP_ENCAP_SLL = 19
WTAP_ENCAP_FRELAY = 20
WTAP_ENCAP_CHDLC = 21
WTAP_ENCAP_CISCO_IOS = 22
WTAP_ENCAP_LOCALTALK = 23
WTAP_ENCAP_PRISM_HEADER = 24
WTAP_ENCAP_PFLOG = 25
WTAP_ENCAP_AIROPEEK = 26
WTAP_ENCAP_HHDLC = 27
# last WTAP_ENCAP_ value + 1
WTAP_NUM_ENCAP_TYPES = 28
wtap_to_pcap_map = {
WTAP_ENCAP_NULL : 0,
WTAP_ENCAP_ETHERNET : 1,
WTAP_ENCAP_TOKEN_RING : 6,
WTAP_ENCAP_ARCNET : 7,
WTAP_ENCAP_SLIP : 8,
WTAP_ENCAP_PPP : 9,
WTAP_ENCAP_FDDI_BITSWAPPED : 10,
WTAP_ENCAP_FDDI : 10,
WTAP_ENCAP_ATM_RFC1483 : 11,
WTAP_ENCAP_RAW_IP : 12,
WTAP_ENCAP_LINUX_ATM_CLIP : 16, # or 18, or 19...
WTAP_ENCAP_CHDLC : 104,
WTAP_ENCAP_IEEE_802_11 : 105,
WTAP_ENCAP_SLL : 113,
WTAP_ENCAP_LOCALTALK : 114,
WTAP_ENCAP_PFLOG : 117,
WTAP_ENCAP_CISCO_IOS : 118,
WTAP_ENCAP_PRISM_HEADER : 119,
WTAP_ENCAP_HHDLC : 121,
}
wtap_name = {
WTAP_ENCAP_NONE : "None",
WTAP_ENCAP_UNKNOWN : "Unknown",
WTAP_ENCAP_ETHERNET : "Ethernet",
WTAP_ENCAP_TOKEN_RING : "Token-Ring",
WTAP_ENCAP_SLIP : "SLIP",
WTAP_ENCAP_PPP : "PPP",
WTAP_ENCAP_FDDI : "FDDI",
WTAP_ENCAP_FDDI_BITSWAPPED : "FDDI (Bitswapped)",
WTAP_ENCAP_RAW_IP : "Raw IP",
WTAP_ENCAP_ARCNET : "ARCNET",
WTAP_ENCAP_ATM_RFC1483 : "ATM RFC1483",
WTAP_ENCAP_LINUX_ATM_CLIP : "Linux ATM CLIP",
WTAP_ENCAP_LAPB : "LAPB",
WTAP_ENCAP_ATM_SNIFFER : "ATM Sniffer",
WTAP_ENCAP_NULL : "Null",
WTAP_ENCAP_ASCEND : "Ascend",
WTAP_ENCAP_LAPD : "LAPD",
WTAP_ENCAP_V120 : "V.120",
WTAP_ENCAP_PPP_WITH_PHDR : "PPP (with PHDR)",
WTAP_ENCAP_IEEE_802_11 : "IEEE 802.11",
WTAP_ENCAP_SLL : "SLL",
WTAP_ENCAP_FRELAY : "Frame Relay",
WTAP_ENCAP_CHDLC : "Cisco HDLC",
WTAP_ENCAP_CISCO_IOS : "Cisco IOS",
WTAP_ENCAP_LOCALTALK : "LocalTalk",
WTAP_ENCAP_PRISM_HEADER : "Prism Header",
WTAP_ENCAP_PFLOG : "PFLog",
WTAP_ENCAP_AIROPEEK : "AiroPeek",
WTAP_ENCAP_HHDLC : "HHDLC",
}
def wtap_to_pcap(wtap):
if not wtap_to_pcap_map.has_key(wtap):
sys.exit("Don't know how to convert wiretap encoding %d to libpcap." % \
(wtap))
return wtap_to_pcap_map[wtap]
def run_gdb(*commands):
if len(commands) == 0:
return []
# Create a temporary file
fname = tempfile.mktemp()
try:
fh = open(fname, "w")
except IOError, err:
sys.exit("Cannot open %s for writing: %s" % (fname, err))
# Put the commands in it
for cmd in commands:
fh.write(cmd)
fh.write("\n")
fh.write("quit\n")
try:
fh.close()
except IOError, err:
try:
os.unlink(fname)
except Exception:
pass
sys.exit("Cannot close %s: %s" % (fname, err))
# Run gdb
cmd = "gdb --nw --quiet --command=%s %s %s" % (fname, exec_file, core_file)
if verbose:
print "Invoking %s" % (cmd,)
try:
pipe = os.popen(cmd)
except OSError, err:
try:
os.unlink(fname)
except Exception:
pass
sys.exit("Cannot run gdb: %s" % (err,))
# Get gdb's output
result = pipe.readlines()
error = pipe.close()
if error is not None:
try:
os.unlink(fname)
except Exception:
pass
sys.exit("gdb returned an exit value of %s" % (error,))
# Remove the temp file and return the results
try:
os.unlink(fname)
except Exception:
pass
return result
def get_value_from_frame(frame_num, variable, fmt=""):
cmds = []
if frame_num > 0:
cmds.append("up %d" % (frame_num,))
cmds.append("print %s %s" % (fmt, variable))
lines = apply(run_gdb, cmds)
LOOKING_FOR_START = 0
READING_VALUE = 1
state = LOOKING_FOR_START
result = ""
for line in lines:
if line[-1] == "\n":
line = line[0:-1]
if line[-1] == "\r":
line = line[0:-1]
if state == LOOKING_FOR_START:
if len(line) < 4:
continue
else:
if line[0:4] == "$1 =":
result = line[4:]
state = READING_VALUE
elif state == READING_VALUE:
result += line
return result
def get_int_from_frame(frame_num, variable):
text = get_value_from_frame(frame_num, variable)
try:
integer = int(text)
except ValueError:
sys.exit("Could not convert '%s' to integer." % (text,))
return integer
def get_byte_array_from_frame(frame_num, variable, length):
cmds = []
if frame_num > 0:
cmds.append("up %d" % (frame_num,))
cmds.append("print %s" % (variable,))
cmds.append("x/%dxb %s" % (length, variable))
lines = apply(run_gdb, cmds)
if debug:
print lines
bytes = []
LOOKING_FOR_START = 0
BYTES = 1
state = LOOKING_FOR_START
for line in lines:
if state == LOOKING_FOR_START:
if len(line) < 3:
continue
elif line[0:3] == "$1 ":
state = BYTES
elif state == BYTES:
line.rstrip()
fields = line.split('\t')
if fields[0][-1] != ":":
print "Failed to parse byte array from gdb:"
print line
sys.exit(1)
for field in fields[1:]:
val = int(field, 16)
bytes.append(val)
else:
assert 0
return bytes
def make_cap_file(pkt_data, lnk_t):
pcap_lnk_t = wtap_to_pcap(lnk_t)
# Create a temporary file
fname = tempfile.mktemp()
try:
fh = open(fname, "w")
except IOError, err:
sys.exit("Cannot open %s for writing: %s" % (fname, err))
print "Packet Data:"
# Put the hex dump in it
offset = 0
BYTES_IN_ROW = 16
for byte in pkt_data:
if (offset % BYTES_IN_ROW) == 0:
print >> fh, "\n%08X " % (offset,),
print "\n%08X " % (offset,),
print >> fh, "%02X " % (byte,),
print "%02X " % (byte,),
offset += 1
print >> fh, "\n"
print "\n"
try:
fh.close()
except IOError, err:
try:
os.unlink(fname)
except Exception:
pass
sys.exit("Cannot close %s: %s" % (fname, err))
# Run text2pcap
cmd = "text2pcap -q -l %s %s %s" % (pcap_lnk_t, fname, output_file)
# print "Command is %s" % (cmd,)
try:
retval = os.system(cmd)
except OSError, err:
try:
os.unlink(fname)
except Exception:
pass
sys.exit("Cannot run text2pcap: %s" % (err,))
# Remove the temp file
try:
os.unlink(fname)
except Exception:
pass
if retval == 0:
print "%s created with %d bytes in packet, and %s encoding." % \
(output_file, len(pkt_data), wtap_name[lnk_t])
else:
sys.exit("text2pcap did not run successfully.")
def try_frame(func_text, cap_len_text, lnk_t_text, data_text):
# Get the back trace
bt_text = run_gdb("bt")
bt = BackTrace(bt_text)
if not bt.HasFunction(func_text):
print "%s() not found in backtrace." % (func_text,)
return 0
else:
print "%s() found in backtrace." % (func_text,)
# Figure out where the call to epan_dissect_run is.
frame_num = bt.Frame(func_text)
# Get the capture length
cap_len = get_int_from_frame(frame_num, cap_len_text)
# Get the encoding type
lnk_t = get_int_from_frame(frame_num, lnk_t_text)
# Get the packet data
pkt_data = get_byte_array_from_frame(frame_num, data_text, cap_len)
if verbose:
print "Length=%d" % (cap_len,)
print "Encoding=%d" % (lnk_t,)
print "Data (%d bytes) = %s" % (len(pkt_data), pkt_data)
make_cap_file(pkt_data, lnk_t)
return 1
def run():
if try_frame("epan_dissect_run",
"fd->cap_len", "fd->lnk_t", "data"):
return
elif try_frame("add_packet_to_packet_list",
"fdata->cap_len", "fdata->lnk_t", "buf"):
return
else:
sys.exit("A packet cannot be pulled from this core.")
def usage():
print "pkt-from-core.py [-v] -w capture_file executable-file (core-file or process-id)"
print ""
print "\tGiven an executable file and a core file, this tool"
print "\tuses gdb to retrieve the packet that was being dissected"
print "\tat the time wireshark/tshark stopped running. The packet"
print "\tis saved in the capture_file specified by the -w option."
print ""
print "\t-v : verbose"
sys.exit(1)
def main():
global exec_file
global core_file
global output_file
global verbose
global debug
optstring = "dvw:"
try:
opts, args = getopt.getopt(sys.argv[1:], optstring)
except getopt.error:
usage()
for opt, arg in opts:
if opt == "-w":
output_file = arg
elif opt == "-v":
verbose = 1
elif opt == "-d":
debug = 1
else:
assert 0
if output_file is None:
usage()
if len(args) != 2:
usage()
exec_file = args[0]
core_file = args[1]
run()
if __name__ == '__main__':
main()
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
# |
wireshark/tools/pre-commit | #!/bin/sh
# Copyright 2013, Alexis La Goutte (See AUTHORS file)
#
# For git user: copy tools/pre-commit to .git/hooks/ folder and make it
# executable. To bypass it for a single commit, use the --no-verify argument.
# Using --no-verify will then fail during git review because of a missing
# ChangeID. Fix that by running git review -i. Do not use -i during normal
# operation.
#
# Alternatively, invoke it directly with the commit ID. Example for checking the
# last commit:
#
# tools/pre-commit HEAD~
#
# Relative paths are also supported. For instance, if you are in epan/, then you
# could invoke `../tools/pre-commit HEAD` to check for changes to staged files.
#
# From
# http://mark-story.com/posts/view/using-git-commit-hooks-to-prevent-stupid-mistakes
#
# If the commit identifier is not given, use HEAD instead.
COMMIT_ID="${1:-HEAD}"
UNAME=$( uname -a )
case "$UNAME" in
*\ Msys)
pyvar="pythonw.exe"
;;
*)
pyvar="python3"
;;
esac
PYBIN=${WS_GITHOOK_PYTHON:-$pyvar}
# Path to hook script in the .git directory
hook_script=${GIT_DIR:-.git}/hooks/pre-commit
# Always start in the root directory of the source tree, this allows for
# invocations via relative paths (such as ../tools/pre-commit):
if ! cd "$(git rev-parse --show-toplevel)" ; then
echo "Can't change to the top-level source directory."
exit 1
fi
# Check for newer (actually, different) versions of the pre-commit script
# (but only if invoked as hook, i.e. the commit ID is not given as argument).
if [ -z "$1" ] && [ -f "$hook_script" ]; then
if ! cmp -s "$hook_script" tools/pre-commit; then
echo "Pre-commit hook script is outdated, please update! (cp tools/pre-commit ${hook_script})"
fi
fi
exit_status=0
COMMIT_FILES=$( git diff-index --cached --name-status "${COMMIT_ID}" | grep -v "^D" | cut -f2 | grep "\\.[ch]$" )
DIAMETER_FILES=$( git diff-index --cached --name-status "${COMMIT_ID}" | grep -v "^D" | cut -f2 | grep diameter/ )
# Path to filter script in the tools directory
filter_script=${PWD}/tools/pre-commit-ignore.py
filter_conf=${PWD}/tools/pre-commit-ignore.conf
if [ -f "$filter_script" ] && [ -f "$filter_conf" ]; then
CHECK_FILES=$( echo "$COMMIT_FILES" | "$PYBIN" "$filter_script" "$filter_conf" ) || exit
else
CHECK_FILES="$COMMIT_FILES"
fi
bad_alloc_patterns=${PWD}/tools/detect_bad_alloc_patterns.py
echo "$COMMIT_FILES" | $PYBIN "$bad_alloc_patterns"
# On windows python will output \r\n line endings - we don't want that.
#
# Do not use sed, as not all versions of sed support \r as meaning CR
# in a regexp - the only version that does so might be GNU sed; the
# GNU sed documentation says that only \n and \\ can be used in a
# portable script.
#
# The Single UNIX Specification says that tr supports \r; most if not
# all modern UN*Xes should support it.
CHECK_FILES=$( echo "$CHECK_FILES" | tr -d '\r' )
for FILE in $CHECK_FILES; do
# Skip some special cases
FILE_BASENAME="$( basename "$FILE" )"
# This should only be done on code that's part of one or more
# Wireshark programs; idl2wrs.c is a developer tool, not a
# Wireshark program, so these tests don't apply.
if test "$FILE_BASENAME" = "idl2wrs.c"
then
continue
fi
if test "$FILE_BASENAME" = "wmem_test.c"
then
continue
fi
#Check if checkhf is good
./tools/checkhf.pl "$FILE" || exit_status=1
#Check if checkAPIs is good
./tools/checkAPIs.pl -p "$FILE" || exit_status=1
#Check if fix-encoding-args is good
./tools/fix-encoding-args.pl "$FILE" || exit_status=1
#Check if checkfiltername is good
./tools/checkfiltername.pl "$FILE" || exit_status=1
# If there are whitespace errors, print the offending file names and fail. (from git pre-commit.sample)
git diff-index --check --cached "${COMMIT_ID}" "$FILE" || exit_status=1
done
if [ "x$DIAMETER_FILES" != x ]
then
./tools/validate-diameter-xml.sh > /dev/null || exit_status=1
fi
exit $exit_status
#
# Editor modelines
#
# Local Variables:
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
#
# ex: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
# |
|
wireshark/tools/pre-commit-ignore.conf | # Files listed here are ignored by the git pre-commit hook for the purpose
# of checking for forbidden APIs and other dissector-specific glitches.
#
# Each line is compared against the output of 'git diff-index --name-only'.
# For example to skip checking this file add:
#
# tools/pre-commit-ignore.conf
#
# The pathname wildcards allowed are: '*', '?', character set '[abc]' or
# negated with '[!abc]'.
cli_main.c
doc/packet-PROTOABBREV.c
epan/dissectors/asn1/*/*asn
epan/dissectors/asn1/*/packet-*-template.c
epan/dissectors/packet-http.c
epan/nghttp2/*
epan/wmem/wmem_strbuf.c
epan/wmem/wmem_strutil.c
epan/wslua/init_wslua.c
extcap/*
resources/stock_icons/*
mmdbresolve.c
packaging/*
tools/lemon/*
wsutil/file_util.h
wsutil/strptime.c |
|
Python | wireshark/tools/pre-commit-ignore.py | #!/bin/env python3
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
import sys
import os
import fnmatch
IGNORE_CONF = "pre-commit-ignore.conf"
if len(sys.argv) > 2:
print("Usage: {0} [path/to/ignore.conf]".format(sys.argv[0]))
sys.exit(1)
if len(sys.argv) == 2:
ignore_path = sys.argv[1]
else:
ignore_path = IGNORE_CONF
# Function to load our patterns from 'path' for modified files
# to be ignored (skipping any comments)
def load_checkignore(path):
try:
with open(path) as f:
patterns = f.read()
except OSError as err:
sys.exit(str(err))
ign = [l.strip() for l in patterns.splitlines()]
ign = [l for l in ign if l and not l.startswith("#")]
return ign
ignore_list = load_checkignore(ignore_path)
def ignore_match(f):
for p in ignore_list:
if fnmatch.fnmatchcase(f, p):
return True
return False
for line in sys.stdin:
line = line.strip()
if not ignore_match(line):
print(line)
#
# Editor modelines
#
# Local Variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# ex: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
# |
Perl | wireshark/tools/process-x11-fields.pl | #!/usr/bin/perl
#
# Script to convert "x11-fields" file, listing fields for
# X11 dissector, into header files declaring field-index
# values and field definitions for those fields.
#
# Instructions for using this script are in epan/dissectors/README.X11
#
# Copyright 2000, Christophe Tronche <ch.tronche[AT]computer.org>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
use File::Spec;
my $srcdir = shift;
die "'$srcdir' is not a directory" unless -d $srcdir;
open(DECL, "> $srcdir/x11-declarations.h") || die;
open(REG, "> $srcdir/x11-register-info.h") || die;
my $script_name = File::Spec->abs2rel ($0, $srcdir);
sub add_generated_header {
my ($out) = @_;
print $out <<eot
/* Do not modify this file. */
/* It was automatically generated by $script_name. */
eot
;
# Add license text
print $out <<eot
/*
* Copyright 2000, Christophe Tronche <ch.tronche[AT]computer.org>
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald[AT]wireshark.org>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
eot
;
}
add_generated_header(DECL);
add_generated_header(REG);
$prefix = '';
$subfieldStringLength = 0;
while(<>) {
s/#.*$//go;
next if /^\s*$/o;
s/^(\s*)//o;
$subfield = $1;
if (length $subfield != $subfieldStringLength) {
if (!length $subfield) {
$prefix = '';
} elsif (length $subfield > $subfieldStringLength) {
$prefix .= "$lastAbbrev.";
} else {
$prefix =~ s/^(.*)\.[^\.]+\.$/$1./o;
}
$subfieldStringLength = length $subfield;
}
@fields = split /\s+/o ;
if ($fields[0] eq '#') {
#
# If the line begins with "#", treat it as a comment, by
# ignoring it.
#
# (We don't support comments at the end of a line; that would
# require some more pain in our simple parser.)
#
next;
}
$abbrev = shift @fields;
$type = shift @fields;
$lastAbbrev = $abbrev;
$field = $prefix.$abbrev;
if ($fields[0] =~ /^\d+$/o) {
#
# This is presumably a Boolean bitfield, and this is the number
# of bits in the parent field.
#
$fieldDisplay = shift @fields;
} else {
#
# The next token is the base for the field.
#
$fieldDisplay = "BASE_".shift @fields;
}
if ($fields[0] eq 'VALS') {
#
# It's an enumerated field, with the value_string table having a
# name based on the field's name.
#
shift @fields;
$fieldStrings = "VALS(${abbrev}_vals)";
$fieldStrings =~ s/-/_/go;
} elsif ($fields[0] =~ /^VALS\(/o) {
#
# It's an enumerated field, with a specified name for the
# value_string table.
#
$fieldStrings = shift @fields;
$fieldStrings =~ s/\)/_vals\)/o;
} else {
#
# It's not an enumerated field.
#
$fieldStrings = 'NULL';
}
if ($fields[0] =~ /^0x/) {
#
# The next token looks like a bitmask for a bitfield.
#
$mask = shift @fields;
} else {
$mask = 0;
}
$rest = join(' ', @fields);
$longName = uc $name;
$longName = $rest if ($rest);
# Don't allow empty blurbs
$longName = $longName eq "" ? "NULL" : "\"$longName\"";
$variable = $field;
$variable =~ s/-/_/go;
$variable =~ s/\./_/go;
print DECL "static int hf_x11_$variable = -1;\n";
print REG <<END;
{ &hf_x11_$variable, { "$abbrev", "x11.$field", FT_$type, $fieldDisplay, $fieldStrings, $mask, $longName, HFILL }},
END
}
#
# Editor modelines
#
# Local Variables:
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
#
# ex: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
# |
Perl | wireshark/tools/process-x11-xcb.pl | #!/usr/bin/perl
#
# Script to convert xcbproto and mesa protocol files for
# X11 dissector. Creates header files containing code to
# dissect X11 extensions.
#
# Instructions for using this script are in epan/dissectors/README.X11
#
# Copyright 2008, 2009, 2013, 2014 Open Text Corporation <pharris[AT]opentext.com>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
#TODO
# - support constructs that are legal in XCB, but don't appear to be used
use 5.010;
use warnings;
use strict;
# given/when is going to be removed (and/or dramatically altered)
# in 5.20. Patches welcome.
# Patches even more welcome if they rewrite this whole thing in a
# language with a proper compatibility document, such as
# http://golang.org/doc/go1compat
no if $] >= 5.018, warnings => "experimental::smartmatch";
use IO::File;
use XML::Twig;
use File::Spec;
my $srcdir = shift;
die "'$srcdir' is not a directory" unless -d $srcdir;
my @reslist = grep {!/xproto\.xml$/} glob File::Spec->catfile($srcdir, 'xcbproto', 'src', '*.xml');
my @register;
my $script_name = File::Spec->abs2rel ($0, $srcdir);
my %basictype = (
char => { size => 1, encoding => 'ENC_ASCII|ENC_NA', type => 'FT_STRING', base => 'BASE_NONE', get => 'tvb_get_guint8', list => 'listOfByte', },
void => { size => 1, encoding => 'ENC_NA', type => 'FT_BYTES', base => 'BASE_NONE', get => 'tvb_get_guint8', list => 'listOfByte', },
BYTE => { size => 1, encoding => 'ENC_NA', type => 'FT_BYTES', base => 'BASE_NONE', get => 'tvb_get_guint8', list => 'listOfByte', },
CARD8 => { size => 1, encoding => 'byte_order', type => 'FT_UINT8', base => 'BASE_HEX_DEC', get => 'tvb_get_guint8', list => 'listOfByte', },
CARD16 => { size => 2, encoding => 'byte_order', type => 'FT_UINT16', base => 'BASE_HEX_DEC', get => 'tvb_get_guint16', list => 'listOfCard16', },
CARD32 => { size => 4, encoding => 'byte_order', type => 'FT_UINT32', base => 'BASE_HEX_DEC', get => 'tvb_get_guint32', list => 'listOfCard32', },
CARD64 => { size => 8, encoding => 'byte_order', type => 'FT_UINT64', base => 'BASE_HEX_DEC', get => 'tvb_get_guint64', list => 'listOfCard64', },
INT8 => { size => 1, encoding => 'byte_order', type => 'FT_INT8', base => 'BASE_DEC', get => 'tvb_get_guint8', list => 'listOfByte', },
INT16 => { size => 2, encoding => 'byte_order', type => 'FT_INT16', base => 'BASE_DEC', get => 'tvb_get_guint16', list => 'listOfInt16', },
INT32 => { size => 4, encoding => 'byte_order', type => 'FT_INT32', base => 'BASE_DEC', get => 'tvb_get_guint32', list => 'listOfInt32', },
INT64 => { size => 8, encoding => 'byte_order', type => 'FT_INT64', base => 'BASE_DEC', get => 'tvb_get_guint64', list => 'listOfInt64', },
float => { size => 4, encoding => 'byte_order', type => 'FT_FLOAT', base => 'BASE_NONE', get => 'tvb_get_ieee_float', list => 'listOfFloat', },
double => { size => 8, encoding => 'byte_order', type => 'FT_DOUBLE', base => 'BASE_NONE', get => 'tvb_get_ieee_double', list => 'listOfDouble', },
BOOL => { size => 1, encoding => 'byte_order', type => 'FT_BOOLEAN',base => 'BASE_NONE', get => 'tvb_get_guint8', list => 'listOfByte', },
);
my %simpletype; # Reset at the beginning of each extension
my %gltype; # No need to reset, since it's only used once
my %struct = # Not reset; contains structures already defined.
# Also contains this black-list of structures never used by any
# extension (to avoid generating useless code).
(
# structures defined by xproto, but not used by any extension
'xproto:CHAR2B' => 1,
'xproto:ARC' => 1,
'xproto:FORMAT' => 1,
'xproto:VISUALTYPE' => 1,
'xproto:DEPTH' => 1,
'xproto:SCREEN' => 1,
'xproto:SetupRequest' => 1,
'xproto:SetupFailed' => 1,
'xproto:SetupAuthenticate' => 1,
'xproto:Setup' => 1,
'xproto:TIMECOORD' => 1,
'xproto:FONTPROP' => 1,
'xproto:CHARINFO' => 1,
'xproto:SEGMENT' => 1,
'xproto:COLORITEM' => 1,
'xproto:RGB' => 1,
'xproto:HOST' => 1,
'xproto:POINT' => 1,
# structures defined by xinput, but never used (except by each other)(bug in xcb?)
'xinput:KeyInfo' => 1,
'xinput:ButtonInfo' => 1,
'xinput:ValuatorInfo' => 1,
'xinput:KbdFeedbackState' => 1,
'xinput:PtrFeedbackState' => 1,
'xinput:IntegerFeedbackState' => 1,
'xinput:StringFeedbackState' => 1,
'xinput:BellFeedbackState' => 1,
'xinput:LedFeedbackState' => 1,
'xinput:KbdFeedbackCtl' => 1,
'xinput:PtrFeedbackCtl' => 1,
'xinput:IntegerFeedbackCtl' => 1,
'xinput:StringFeedbackCtl' => 1,
'xinput:BellFeedbackCtl' => 1,
'xinput:LedFeedbackCtl' => 1,
'xinput:KeyState' => 1,
'xinput:ButtonState' => 1,
'xinput:ValuatorState' => 1,
'xinput:DeviceResolutionState' => 1,
'xinput:DeviceAbsCalibState' => 1,
'xinput:DeviceAbsAreaState' => 1,
'xinput:DeviceCoreState' => 1,
'xinput:DeviceEnableState' => 1,
'xinput:DeviceResolutionCtl' => 1,
'xinput:DeviceAbsCalibCtl' => 1,
'xinput:DeviceAbsAreaCtrl' => 1,
'xinput:DeviceCoreCtrl' => 1,
'xinput:DeviceEnableCtrl' => 1,
'xinput:DeviceName' => 1,
'xinput:AddMaster' => 1,
'xinput:RemoveMaster' => 1,
'xinput:AttachSlave' => 1,
'xinput:DetachSlave' => 1,
'xinput:ButtonClass' => 1,
'xinput:KeyClass' => 1,
'xinput:ScrollClass' => 1,
'xinput:TouchClass' => 1,
'xinput:ValuatorClass' => 1,
# structures defined by xv, but never used (bug in xcb?)
'xv:Image' => 1,
# structures defined by xkb, but never used (except by each other)(bug in xcb?)
'xkb:Key' => 1,
'xkb:Outline' => 1,
'xkb:Overlay' => 1,
'xkb:OverlayKey' => 1,
'xkb:OverlayRow' => 1,
'xkb:Row' => 1,
'xkb:Shape' => 1,
);
my %enum; # Not reset; contains enums already defined.
my %enum_name;
my %type_name;
my $header;
my $extname;
my @incname;
my %request;
my %genericevent;
my %event;
my %reply;
# Output files
my $impl;
my $reg;
my $decl;
my $error;
# glRender sub-op output files
my $enum;
# Mesa API definitions keep moving
my @mesas = ($srcdir . '/mesa/src/mapi/glapi/gen', # 2010-04-26
$srcdir . '/mesa/src/mesa/glapi/gen', # 2010-02-22
$srcdir . '/mesa/src/mesa/glapi'); # 2004-05-18
my $mesadir = (grep { -d } @mesas)[0];
sub mesa_category {
my ($t, $elt) = @_;
$t->purge;
}
#used to prevent duplication and sort enumerated values
my %mesa_enum_hash = ();
sub mesa_enum {
my ($t, $elt) = @_;
my $name = $elt->att('name');
my $value = $elt->att('value');
my $hex_value = hex($value); #convert string to hex value to catch leading zeros
#make sure value isn't already in the hash, to prevent duplication in value_string
if (!exists($mesa_enum_hash{$hex_value})) {
$mesa_enum_hash{$hex_value} = $name;
}
$t->purge;
}
sub mesa_type {
my ($t, $elt) = @_;
my $name = $elt->att('name');
my $size = $elt->att('size');
my $float = $elt->att('float');
my $unsigned = $elt->att('unsigned');
my $base;
$t->purge;
if($name eq 'enum') {
# enum does not have a direct X equivalent
$gltype{'GLenum'} = { size => 4, encoding => 'byte_order', type => 'FT_UINT32', base => 'BASE_HEX|BASE_EXT_STRING',
get => 'tvb_get_guint32', list => 'listOfCard32',
val => '&mesa_enum_ext', };
return;
}
$name = 'GL'.$name;
if (defined($float) && $float eq 'true') {
$base = 'float';
$base = 'double' if ($size == 8);
} else {
$base = 'INT';
if (defined($unsigned) && $unsigned eq 'true') {
$base = 'CARD';
}
$base .= ($size * 8);
$base = 'BOOL' if ($name eq 'bool');
$base = 'BYTE' if ($name eq 'void');
}
$gltype{$name} = $basictype{$base};
}
sub registered_name($$)
{
my $name = shift;
my $field = shift;
return "hf_x11_$header"."_$name"."_$field";
}
sub mesa_function {
my ($t, $elt) = @_;
# rop == glRender sub-op
# sop == GLX minor opcode
my $glx = $elt->first_child('glx');
unless(defined $glx) { $t->purge; return; }
my $rop = $glx->att('rop');
unless (defined $rop) { $t->purge; return; }
# Ideally, we want the main name, not the alias name.
# Practically, we'd have to scan the file twice to find
# the functions that we want to skip.
my $alias = $elt->att('alias');
if (defined $alias) { $t->purge; return; }
my $name = $elt->att('name');
$request{$rop} = $name;
my $image;
my $length = 0;
my @elements = $elt->children('param');
# Wireshark defines _U_ to mean "Unused" (compiler specific define)
if (!@elements) {
print $impl <<eot
static void mesa_$name(tvbuff_t *tvb _U_, int *offsetp _U_, proto_tree *t _U_, guint byte_order _U_, int length _U_)
{
eot
;
} else {
print $impl <<eot
static void mesa_$name(tvbuff_t *tvb, int *offsetp, proto_tree *t, guint byte_order, int length _U_)
{
eot
;
}
my %type_param;
foreach my $e (@elements) {
# Detect count && variable_param
my $count = $e->att('count');
my $variable_param = $e->att('variable_param');
if (defined $count and defined $variable_param) {
$type_param{$variable_param} = 1;
}
}
foreach my $e (@elements) {
# Register field with wireshark
my $type = $e->att('type');
$type =~ s/^const //;
my $list;
$list = 1 if ($type =~ /\*$/);
$type =~ s/ \*$//;
my $fieldname = $e->att('name');
my $regname = registered_name($name, $fieldname);
my $info = $gltype{$type};
my $ft = $info->{'type'};
my $base = $info->{'base'};
my $val = $info->{'val'} // 'NULL';
my $count = $e->att('count');
my $variable_param = $e->att('variable_param');
if ($list and $count and $variable_param) {
print $decl "static int ${regname} = -1;\n";
print $reg "{ &$regname, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
print $decl "static int ${regname}_signed = -1;\n";
print $reg "{ &${regname}_signed, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_INT8, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
print $decl "static int ${regname}_unsigned = -1;\n";
print $reg "{ &${regname}_unsigned, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_UINT8, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
print $decl "static int ${regname}_item_card16 = -1;\n";
print $reg "{ &${regname}_item_card16, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_UINT16, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
print $decl "static int ${regname}_item_int16 = -1;\n";
print $reg "{ &${regname}_item_int16, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_INT16, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
print $decl "static int ${regname}_item_card32 = -1;\n";
print $reg "{ &${regname}_item_card32, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_UINT32, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
print $decl "static int ${regname}_item_int32 = -1;\n";
print $reg "{ &${regname}_item_int32, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_INT32, BASE_DEC, NULL, 0, NULL, HFILL }},\n";
print $decl "static int ${regname}_item_float = -1;\n";
print $reg "{ &${regname}_item_float, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", FT_FLOAT, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
} else {
print $decl "static int $regname = -1;\n";
if ($list and $info->{'size'} > 1) {
print $reg "{ &$regname, { \"$fieldname\", \"x11.glx.render.$name.$fieldname.list\", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
$regname .= '_item';
print $decl "static int $regname = -1;\n";
}
print $reg "{ &$regname, { \"$fieldname\", \"x11.glx.render.$name.$fieldname\", $ft, $base, $val, 0, NULL, HFILL }},\n";
if ($e->att('counter') or $type_param{$fieldname}) {
print $impl " int $fieldname;\n";
}
}
if ($list) {
if ($e->att('img_format')) {
$image = 1;
foreach my $wholename (('swap bytes', 'lsb first')) {
# Boolean values
my $varname = $wholename;
$varname =~ s/\s//g;
my $regname = registered_name($name, $varname);
print $decl "static int $regname = -1;\n";
print $reg "{ &$regname, { \"$wholename\", \"x11.glx.render.$name.$varname\", FT_BOOLEAN, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
}
foreach my $wholename (('row length', 'skip rows', 'skip pixels', 'alignment')) {
# Integer values
my $varname = $wholename;
$varname =~ s/\s//g;
my $regname = registered_name($name, $varname);
print $decl "static int $regname = -1;\n";
print $reg "{ &$regname, { \"$wholename\", \"x11.glx.render.$name.$varname\", FT_UINT32, BASE_HEX_DEC, NULL, 0, NULL, HFILL }},\n";
}
}
}
}
# The image requests have a few implicit elements first:
if ($image) {
foreach my $wholename (('swap bytes', 'lsb first')) {
# Boolean values
my $varname = $wholename;
$varname =~ s/\s//g;
my $regname = registered_name($name, $varname);
print $impl " proto_tree_add_item(t, $regname, tvb, *offsetp, 1, byte_order);\n";
print $impl " *offsetp += 1;\n";
$length += 1;
}
print $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, 2, ENC_NA);\n";
print $impl " *offsetp += 2;\n";
$length += 2;
foreach my $wholename (('row length', 'skip rows', 'skip pixels', 'alignment')) {
# Integer values
my $varname = $wholename;
$varname =~ s/\s//g;
my $regname = registered_name($name, $varname);
print $impl " proto_tree_add_item(t, $regname, tvb, *offsetp, 4, byte_order);\n";
print $impl " *offsetp += 4;\n";
$length += 4;
}
}
foreach my $e (@elements) {
my $type = $e->att('type');
$type =~ s/^const //;
my $list;
$list = 1 if ($type =~ /\*$/);
$type =~ s/ \*$//;
my $fieldname = $e->att('name');
my $regname = registered_name($name, $fieldname);
my $info = $gltype{$type};
my $ft = $info->{'type'};
my $base = $info->{'base'};
if (!$list) {
my $size = $info->{'size'};
my $encoding = $info->{'encoding'};
my $get = $info->{'get'};
if ($e->att('counter') or $type_param{$fieldname}) {
if ($get ne "tvb_get_guint8") {
print $impl " $fieldname = $get(tvb, *offsetp, $encoding);\n";
} else {
print $impl " $fieldname = $get(tvb, *offsetp);\n";
}
}
print $impl " proto_tree_add_item(t, $regname, tvb, *offsetp, $size, $encoding);\n";
print $impl " *offsetp += $size;\n";
$length += $size;
} else { # list
my $list = $info->{'list'};
my $count = $e->att('count');
my $variable_param = $e->att('variable_param');
if (defined($count) && !defined($variable_param)) {
$regname .= ", $regname".'_item' if ($info->{'size'} > 1);
print $impl " $list(tvb, offsetp, t, $regname, $count, byte_order);\n";
} else {
if (defined($count)) {
# Currently, only CallLists has both a count and a variable_param
# The XML contains a size description of all the possibilities
# for CallLists, but not a type description. Implement by hand,
# with the caveat that more types may need to be added in the
# future.
say $impl " switch($variable_param) {";
say $impl " case 0x1400: /* BYTE */";
say $impl " listOfByte(tvb, offsetp, t, ${regname}_signed, $count, byte_order);";
say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - $count), ENC_NA);";
say $impl " *offsetp += (length - $length - $count);";
say $impl " break;";
say $impl " case 0x1401: /* UNSIGNED_BYTE */";
say $impl " listOfByte(tvb, offsetp, t, ${regname}_unsigned, $count, byte_order);";
say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - $count), ENC_NA);";
say $impl " *offsetp += (length - $length - $count);";
say $impl " break;";
say $impl " case 0x1402: /* SHORT */";
say $impl " listOfInt16(tvb, offsetp, t, $regname, ${regname}_item_int16, $count, byte_order);";
say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - 2 * $count), ENC_NA);";
say $impl " *offsetp += (length - $length - 2 * $count);";
say $impl " break;";
say $impl " case 0x1403: /* UNSIGNED_SHORT */";
say $impl " listOfCard16(tvb, offsetp, t, $regname, ${regname}_item_card16, $count, byte_order);";
say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - 2 * $count), ENC_NA);";
say $impl " *offsetp += (length - $length - 2 * $count);";
say $impl " break;";
say $impl " case 0x1404: /* INT */";
say $impl " listOfInt32(tvb, offsetp, t, $regname, ${regname}_item_int32, $count, byte_order);";
say $impl " break;";
say $impl " case 0x1405: /* UNSIGNED_INT */";
say $impl " listOfCard32(tvb, offsetp, t, $regname, ${regname}_item_card32, $count, byte_order);";
say $impl " break;";
say $impl " case 0x1406: /* FLOAT */";
say $impl " listOfFloat(tvb, offsetp, t, $regname, ${regname}_item_float, $count, byte_order);";
say $impl " break;";
say $impl " case 0x1407: /* 2_BYTES */";
say $impl " listOfCard16(tvb, offsetp, t, $regname, ${regname}_item_card16, $count, ENC_BIG_ENDIAN);";
say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - 2 * $count), ENC_NA);";
say $impl " *offsetp += (length - $length - 2 * $count);";
say $impl " break;";
say $impl " case 0x1408: /* 3_BYTES */";
say $impl " UNDECODED(3 * $count);";
say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - 3 * $count), ENC_NA);";
say $impl " *offsetp += (length - $length - 3 * $count);";
say $impl " break;";
say $impl " case 0x1409: /* 4_BYTES */";
say $impl " listOfCard32(tvb, offsetp, t, $regname, ${regname}_item_card32, $count, ENC_BIG_ENDIAN);";
say $impl " break;";
say $impl " case 0x140B: /* HALF_FLOAT */";
say $impl " UNDECODED(2 * $count);";
say $impl " proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, (length - $length - 2 * $count), ENC_NA);";
say $impl " *offsetp += (length - $length - 2 * $count);";
say $impl " break;";
say $impl " default: /* Unknown */";
say $impl " UNDECODED(length - $length);";
say $impl " break;";
say $impl " }";
} else {
$regname .= ", $regname".'_item' if ($info->{'size'} > 1);
print $impl " $list(tvb, offsetp, t, $regname, (length - $length) / $gltype{$type}{'size'}, byte_order);\n";
}
}
}
}
print $impl "}\n\n";
$t->purge;
}
sub get_op($;$);
sub get_unop($;$);
sub get_ref($$)
{
my $elt = shift;
my $refref = shift;
my $rv;
given($elt->name()) {
when ('fieldref') {
$rv = $elt->text();
$refref->{$rv} = 1;
$rv = 'f_'.$rv;
}
when ('value') { $rv = $elt->text(); }
when ('op') { $rv = get_op($elt, $refref); }
when (['unop','popcount']) { $rv = get_unop($elt, $refref); }
default { die "Invalid op fragment: $_" }
}
return $rv;
}
sub get_op($;$) {
my $op = shift;
my $refref = shift // {};
my @elements = $op->children(qr/fieldref|value|op|unop|popcount/);
(@elements == 2) or die ("Wrong number of children for 'op'\n");
my $left;
my $right;
$left = get_ref($elements[0], $refref);
$right = get_ref($elements[1], $refref);
return "($left " . $op->att('op') . " $right)";
}
sub get_unop($;$) {
my $op = shift;
my $refref = shift // {};
my @elements = $op->children(qr/fieldref|value|op|unop|popcount/);
(@elements == 1) or die ("Wrong number of children for 'unop'\n");
my $left;
$left = get_ref($elements[0], $refref);
given ($op->name()) {
when ('unop') {
return '(' . $op->att('op') . "$left)";
}
when ('popcount') {
return "ws_count_ones($left)";
}
default { die "Invalid unop element $op->name()\n"; }
}
}
sub qualname {
my $name = shift;
$name = $incname[0].':'.$name unless $name =~ /:/;
return $name
}
sub get_simple_info {
my $name = shift;
my $info = $basictype{$name};
return $info if (defined $info);
$info = $simpletype{$name};
return $info if (defined $info);
if (defined($type_name{$name})) {
return $simpletype{$type_name{$name}};
}
return undef
}
sub get_struct_info {
my $name = shift;
my $info = $struct{$name};
return $info if (defined $info);
if (defined($type_name{$name})) {
return $struct{$type_name{$name}};
}
return undef
}
sub getinfo {
my $name = shift;
my $info = get_simple_info($name) // get_struct_info($name);
# If the script fails here search for $name in this script and remove it from the black list
die "$name is defined to be unused in process-x11-xcb.pl but is actually used!" if (defined($info) && $info == "1");
return $info;
}
sub dump_enum_values($)
{
my $e = shift;
defined($enum{$e}) or die("Enum $e not found");
my $enumname = "x11_enum_$e";
return $enumname if (defined $enum{$e}{done});
say $enum 'static const value_string '.$enumname.'[] = {';
my $value = $enum{$e}{value};
for my $val (sort { $a <=> $b } keys %$value) {
say $enum sprintf(" { %3d, \"%s\" },", $val, $$value{$val});
}
say $enum sprintf(" { %3d, NULL },", 0);
say $enum '};';
say $enum '';
$enum{$e}{done} = 1;
return $enumname;
}
# Find all references, so we can declare only the minimum necessary
sub reference_elements($$);
sub reference_elements($$)
{
my $e = shift;
my $refref = shift;
given ($e->name()) {
when ('switch') {
my $lentype = $e->first_child();
if (defined $lentype) {
given ($lentype->name()) {
when ('fieldref') { $refref->{field}{$lentype->text()} = 1; }
when ('op') { get_op($lentype, $refref->{field}); }
}
}
my @elements = $e->children(qr/(bit)?case/);
for my $case (@elements) {
my @sub_elements = $case->children(qr/list|switch/);
foreach my $sub_e (@sub_elements) {
reference_elements($sub_e, $refref);
}
}
}
when ('list') {
my $type = $e->att('type');
my $info = getinfo($type);
if (defined $info->{paramref}) {
for my $pref (keys %{$info->{paramref}}) {
$refref->{field}{$pref} = 1;
}
}
my $lentype = $e->first_child();
if (defined $lentype) {
given ($lentype->name()) {
when ('fieldref') { $refref->{field}{$lentype->text()} = 1; }
when ('op') { get_op($lentype, $refref->{field}); }
when (['unop','popcount']) { get_unop($lentype, $refref->{field}); }
when ('sumof') { $refref->{sumof}{$lentype->att('ref')} = 1; }
}
} else {
$refref->{field}{'length'} = 1;
$refref->{'length'} = 1;
}
}
}
}
sub register_element($$$$;$)
{
my $e = shift;
my $varpat = shift;
my $humanpat = shift;
my $refref = shift;
my $indent = shift // ' ' x 4;
given ($e->name()) {
when ('pad') { return; } # Pad has no variables
when ('switch') { return; } # Switch defines varaibles in a tighter scope to avoid collisions
}
# Register field with wireshark
my $fieldname = $e->att('name');
my $type = $e->att('type') or die ("Field $fieldname does not have a valid type\n");
my $regname = 'hf_x11_'.sprintf ($varpat, $fieldname);
my $humanname = 'x11.'.sprintf ($humanpat, $fieldname);
my $info = getinfo($type);
my $ft = $info->{'type'} // 'FT_NONE';
my $base = $info->{'base'} // 'BASE_NONE';
my $vals = 'NULL';
my $enum = $e->att('enum') // $e->att('altenum');
if (defined $enum) {
my $enumname = dump_enum_values($enum_name{$enum});
$vals = "VALS($enumname)";
# Wireshark does not allow FT_BYTES, FT_BOOLEAN, or BASE_NONE to have an enum
$ft =~ s/FT_BYTES/FT_UINT8/;
$ft =~ s/FT_BOOLEAN/FT_UINT8/;
$base =~ s/BASE_NONE/BASE_DEC/;
}
$enum = $e->att('mask');
if (defined $enum) {
# Create subtree items:
defined($enum{$enum_name{$enum}}) or die("Enum $enum not found");
# Wireshark does not allow FT_BYTES or BASE_NONE to have an enum
$ft =~ s/FT_BYTES/FT_UINT8/;
$base =~ s/BASE_NONE/BASE_DEC/;
my $bitsize = $info->{'size'} * 8;
my $bit = $enum{$enum_name{$enum}}{bit};
for my $val (sort { $a <=> $b } keys %$bit) {
my $itemname = $$bit{$val};
my $item = $regname . '_mask_' . $itemname;
my $itemhuman = $humanname . '.' . $itemname;
my $bitshift = "1U << $val";
say $decl "static int $item = -1;";
say $reg "{ &$item, { \"$itemname\", \"$itemhuman\", FT_BOOLEAN, $bitsize, NULL, $bitshift, NULL, HFILL }},";
}
}
print $decl "static int $regname = -1;\n";
if ($e->name() eq 'list' and defined $info->{'size'} and $info->{'size'} > 1) {
print $reg "{ &$regname, { \"$fieldname\", \"$humanname.list\", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
$regname .= '_item';
print $decl "static int $regname = -1;\n";
}
print $reg "{ &$regname, { \"$fieldname\", \"$humanname\", $ft, $base, $vals, 0, NULL, HFILL }},\n";
if ($refref->{sumof}{$fieldname}) {
print $impl $indent."int sumof_$fieldname = 0;\n";
}
if ($e->name() eq 'field') {
if ($refref->{field}{$fieldname} and get_simple_info($type)) {
# Pre-declare variable
if ($ft eq 'FT_FLOAT') {
print $impl $indent."gfloat f_$fieldname;\n";
} elsif ($ft eq 'FT_DOUBLE') {
print $impl $indent."gdouble f_$fieldname;\n";
} elsif ($ft eq 'FT_INT64' or $ft eq 'FT_UINT64') {
print $impl $indent."gint64 f_$fieldname;\n";
} else {
print $impl $indent."int f_$fieldname;\n";
}
}
}
}
sub dissect_element($$$$$;$$);
sub dissect_element($$$$$;$$)
{
my $e = shift;
my $varpat = shift;
my $humanpat = shift;
my $length = shift;
my $refref = shift;
my $adjustlength = shift;
my $indent = shift // ' ' x 4;
given ($e->name()) {
when ('pad') {
my $bytes = $e->att('bytes');
my $align = $e->att('align');
if (defined $bytes) {
print $impl $indent."proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, $bytes, ENC_NA);\n";
print $impl $indent."*offsetp += $bytes;\n";
$length += $bytes;
} else {
say $impl $indent.'if (*offsetp % '.$align.') {';
say $impl $indent." proto_tree_add_item(t, hf_x11_unused, tvb, *offsetp, ($align - *offsetp % $align), ENC_NA);";
say $impl $indent." *offsetp += ($align - *offsetp % $align);";
say $impl $indent."}";
if ($length % $align != 0) {
$length += $align - $length % $align;
}
if ($adjustlength) {
say $impl $indent.'length = ((length + '.($align-1).') & ~'.($align-1).');';
}
}
}
when ('field') {
my $fieldname = $e->att('name');
my $regname = 'hf_x11_'.sprintf ($varpat, $fieldname);
my $type = $e->att('type');
if (get_simple_info($type)) {
my $info = get_simple_info($type);
my $size = $info->{'size'};
my $encoding = $info->{'encoding'};
my $get = $info->{'get'};
if ($e->att('enum') // $e->att('altenum')) {
my $fieldsize = $size * 8;
print $impl $indent;
if ($refref->{field}{$fieldname}) {
print $impl "f_$fieldname = ";
}
say $impl "field$fieldsize(tvb, offsetp, t, $regname, byte_order);";
} elsif ($e->att('mask')) {
if ($refref->{field}{$fieldname}) {
if ($get ne "tvb_get_guint8") {
say $impl $indent."f_$fieldname = $get(tvb, *offsetp, byte_order);";
} else {
say $impl $indent."f_$fieldname = $get(tvb, *offsetp);";
}
}
my $bitmask_field = $fieldname . "_bits";
say $impl $indent."{";
say $impl $indent." int* const $bitmask_field [] = {";
my $bit = $enum{$enum_name{$e->att('mask')}}{bit};
for my $val (sort { $a <=> $b } keys %$bit) {
my $item = $regname . '_mask_' . $$bit{$val};
say $impl "$indent$indent&$item,";
}
say $impl "$indent$indent" . "NULL";
say $impl $indent." };";
say $impl $indent." proto_tree_add_bitmask(t, tvb, *offsetp, $regname, ett_x11_rectangle, $bitmask_field, $encoding);";
say $impl $indent."}";
say $impl $indent."*offsetp += $size;";
} else {
if ($refref->{field}{$fieldname}) {
if ($get ne "tvb_get_guint8") {
say $impl $indent."f_$fieldname = $get(tvb, *offsetp, byte_order);";
} else {
say $impl $indent."f_$fieldname = $get(tvb, *offsetp);";
}
}
print $impl $indent."proto_tree_add_item(t, $regname, tvb, *offsetp, $size, $encoding);\n";
print $impl $indent."*offsetp += $size;\n";
}
$length += $size;
} elsif (get_struct_info($type)) {
# TODO: variable-lengths (when $info->{'size'} == 0 )
my $info = get_struct_info($type);
$length += $info->{'size'};
print $impl $indent."struct_$info->{'name'}(tvb, offsetp, t, byte_order, 1);\n";
} else {
die ("Unrecognized type: $type\n");
}
}
when ('list') {
my $fieldname = $e->att('name');
my $regname = 'hf_x11_'.sprintf ($varpat, $fieldname);
my $type = $e->att('type');
my $info = getinfo($type);
my $lencalc;
my $lentype = $e->first_child();
if (defined $info->{'size'}) {
$lencalc = "(length - $length) / $info->{'size'}";
} else {
$lencalc = "(length - $length)";
}
if (defined $lentype) {
given ($lentype->name()) {
when ('value') { $lencalc = $lentype->text(); }
when ('fieldref') { $lencalc = 'f_'.$lentype->text(); }
when ('paramref') { $lencalc = 'p_'.$lentype->text(); }
when ('op') { $lencalc = get_op($lentype); }
when (['unop','popcount']) { $lencalc = get_unop($lentype); }
when ('sumof') { $lencalc = 'sumof_'.$lentype->att('ref'); }
}
}
if (get_simple_info($type)) {
my $list = $info->{'list'};
my $size = $info->{'size'};
$regname .= ", $regname".'_item' if ($size > 1);
if ($refref->{sumof}{$fieldname}) {
my $get = $info->{'get'};
say $impl $indent."{";
say $impl $indent." int i;";
say $impl $indent." for (i = 0; i < $lencalc; i++) {";
if ($get ne "tvb_get_guint8") {
say $impl $indent." sumof_$fieldname += $get(tvb, *offsetp + i * $size, byte_order);";
} else {
say $impl $indent." sumof_$fieldname += $get(tvb, *offsetp + i * $size);";
}
say $impl $indent." }";
say $impl $indent."}";
}
print $impl $indent."$list(tvb, offsetp, t, $regname, $lencalc, byte_order);\n";
} elsif (get_struct_info($type)) {
my $si = get_struct_info($type);
my $prefs = "";
foreach my $pref (sort keys %{$si->{paramref}}) {
$prefs .= ", f_$pref";
}
print $impl $indent."struct_$info->{'name'}(tvb, offsetp, t, byte_order, $lencalc$prefs);\n";
} else {
# TODO: Fix unrecognized type. Comment out for now to generate dissector
# die ("Unrecognized type: $type\n");
}
if ($adjustlength && defined($lentype)) {
# Some requests end with a list of unspecified length
# Adjust the length field here so that the next $lencalc will be accurate
if (defined $info->{'size'}) {
say $impl $indent."length -= $lencalc * $info->{'size'};";
} else {
say $impl $indent."length -= $lencalc * 1;";
}
}
}
when ('switch') {
my $switchtype = $e->first_child() or die("Switch element not defined");
my $switchon = get_ref($switchtype, {});
my @elements = $e->children(qr/(bit)?case/);
for my $case (@elements) {
my @refs = $case->children('enumref');
my @test;
my $fieldname;
foreach my $ref (@refs) {
my $enum_ref = $ref->att('ref');
my $field = $ref->text();
$fieldname //= $field; # Use first named field
if ($case->name() eq 'bitcase') {
my $bit = $enum{$enum_name{$enum_ref}}{rbit}{$field};
if (! defined($bit)) {
for my $foo (keys %{$enum{$enum_name{$enum_ref}}{rbit}}) { say "'$foo'"; }
die ("Field '$field' not found in '$enum_ref'");
}
push @test , "$switchon & (1U << $bit)";
} else {
my $val = $enum{$enum_name{$enum_ref}}{rvalue}{$field};
if (! defined($val)) {
for my $foo (keys %{$enum{$enum_name{$enum_ref}}{rvalue}}) { say "'$foo'"; }
die ("Field '$field' not found in '$enum_ref'");
}
push @test , "$switchon == $val";
}
}
if (@test > 1) {
# We have more than one conditional, add parentheses to them.
# We don't add parentheses to all the conditionals because
# clang complains about the extra parens if you do "if ((x == y))".
my @tests_with_parens;
foreach my $conditional (@test) {
push @tests_with_parens, "($conditional)";
}
@test = @tests_with_parens;
}
my $list = join ' || ', @test;
say $impl $indent."if ($list) {";
my $vp = $varpat;
my $hp = $humanpat;
$vp =~ s/%s/${fieldname}_%s/;
$hp =~ s/%s/${fieldname}.%s/;
my @sub_elements = $case->children(qr/pad|field|list|switch/);
my $subref = { field => {}, sumof => {} };
foreach my $sub_e (@sub_elements) {
reference_elements($sub_e, $subref);
}
foreach my $sub_e (@sub_elements) {
register_element($sub_e, $vp, $hp, $subref, $indent . ' ');
}
foreach my $sub_e (@sub_elements) {
$length = dissect_element($sub_e, $vp, $hp, $length, $subref, $adjustlength, $indent . ' ');
}
say $impl $indent."}";
}
}
default { die "Unknown field type: $_\n"; }
}
return $length;
}
sub struct {
my ($t, $elt) = @_;
my $name = $elt->att('name');
my $qualname = qualname($name);
$type_name{$name} = $qualname;
if (defined $struct{$qualname}) {
$t->purge;
return;
}
my @elements = $elt->children(qr/pad|field|list|switch/);
print(" - Struct $name\n");
$name = $qualname;
$name =~ s/:/_/;
my %refs;
my %paramrefs;
my $size = 0;
my $dynamic = 0;
my $needi = 0;
# Find struct size
foreach my $e (@elements) {
my $count;
$count = 1;
given ($e->name()) {
when ('pad') {
my $bytes = $e->att('bytes');
my $align = $e->att('align');
if (defined $bytes) {
$size += $bytes;
next;
}
if (!$dynamic) {
if ($size % $align) {
$size += $align - $size % $align;
}
}
next;
}
when ('list') {
my $type = $e->att('type');
my $info = getinfo($type);
$needi = 1 if ($info->{'size'} == 0);
my $value = $e->first_child();
given($value->name()) {
when ('fieldref') {
$refs{$value->text()} = 1;
$count = 0;
$dynamic = 1;
}
when ('paramref') {
$paramrefs{$value->text()} = $value->att('type');
$count = 0;
$dynamic = 1;
}
when ('op') {
get_op($value, \%refs);
$count = 0;
$dynamic = 1;
}
when (['unop','popcount']) {
get_unop($value, \%refs);
$count = 0;
$dynamic = 1;
}
when ('value') {
$count = $value->text();
}
default { die("Invalid list size $_\n"); }
}
}
when ('field') { }
when ('switch') {
$dynamic = 1;
next;
}
default { die("unrecognized field: $_\n"); }
}
my $type = $e->att('type');
my $info = getinfo($type);
$size += $info->{'size'} * $count;
}
my $prefs = "";
if ($dynamic) {
$size = 0;
foreach my $pref (sort keys %paramrefs) {
$prefs .= ", int p_$pref";
}
print $impl <<eot
static int struct_size_$name(tvbuff_t *tvb _U_, int *offsetp _U_, guint byte_order _U_$prefs)
{
int size = 0;
eot
;
say $impl ' int i, off;' if ($needi);
foreach my $ref (sort keys %refs) {
say $impl " int f_$ref;";
}
foreach my $e (@elements) {
my $count;
$count = 1;
my $type = $e->att('type') // '';
my $info = getinfo($type);
given ($e->name()) {
when ('pad') {
my $bytes = $e->att('bytes');
my $align = $e->att('align');
if (defined $bytes) {
$size += $bytes;
} else {
say $impl ' size = (size + '.($align-1).') & ~'.($align-1).';';
}
}
when ('list') {
my $len = $e->first_child();
my $infosize = $info->{'size'};
my $sizemul;
given ($len->name()) {
when ('op') { $sizemul = get_op($len, \%refs); }
when (['unop','popcount']) { $sizemul = get_unop($len, \%refs); }
when ('fieldref') { $sizemul = 'f_'.$len->text(); }
when ('paramref') { $sizemul = 'p_'.$len->text(); }
when ('value') {
if ($infosize) {
$size += $infosize * $len->text();
} else {
$sizemul = $len->text();
}
}
default { die "Invalid list size: $_\n"; }
}
if (defined $sizemul) {
if ($infosize) {
say $impl " size += $sizemul * $infosize;";
} else {
say $impl " for (i = 0; i < $sizemul; i++) {";
say $impl " off = (*offsetp) + size + $size;";
say $impl " size += struct_size_$info->{name}(tvb, &off, byte_order);";
say $impl ' }';
}
}
}
when ('field') {
my $fname = $e->att('name');
if (defined($refs{$fname})) {
my $get = $info->{'get'};
if ($get ne "tvb_get_guint8") {
say $impl " f_$fname = $info->{'get'}(tvb, *offsetp + size + $size, byte_order);";
} else {
say $impl " f_$fname = $info->{'get'}(tvb, *offsetp + size + $size);";
}
}
$size += $info->{'size'};
}
}
}
say $impl " return size + $size;";
say $impl '}';
$size = 0; # 0 means "dynamic calcuation required"
}
print $decl "static int hf_x11_struct_$name = -1;\n";
print $reg "{ &hf_x11_struct_$name, { \"$name\", \"x11.struct.$name\", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
print $impl <<eot
static void struct_$name(tvbuff_t *tvb, int *offsetp, proto_tree *root, guint byte_order _U_, int count$prefs)
{
int i;
for (i = 0; i < count; i++) {
proto_item *item;
proto_tree *t;
eot
;
my $varpat = 'struct_'.$name.'_%s';
my $humanpat = "struct.$name.%s";
my $refs = { field => {}, sumof => {} };
foreach my $e (@elements) {
reference_elements($e, $refs);
}
foreach my $e (@elements) {
register_element($e, $varpat, $humanpat, $refs, " ");
}
$prefs = "";
foreach my $pref (sort keys %paramrefs) {
$prefs .= ", p_$pref";
}
my $sizecalc = $size;
$size or $sizecalc = "struct_size_$name(tvb, offsetp, byte_order$prefs)";
print $impl <<eot
item = proto_tree_add_item(root, hf_x11_struct_$name, tvb, *offsetp, $sizecalc, ENC_NA);
t = proto_item_add_subtree(item, ett_x11_rectangle);
eot
;
my $length = 0;
foreach my $e (@elements) {
$length = dissect_element($e, $varpat, $humanpat, $length, $refs, 0, " ");
}
print $impl " }\n}\n";
$struct{$qualname} = { size => $size, name => $name, paramref => \%paramrefs };
$t->purge;
}
sub union {
# TODO proper dissection
#
# Right now, the only extension to use a union is randr.
# for now, punt.
my ($t, $elt) = @_;
my $name = $elt->att('name');
my $qualname = qualname($name);
$type_name{$name} = $qualname;
if (defined $struct{$qualname}) {
$t->purge;
return;
}
my @elements = $elt->children(qr/field/);
my @sizes;
print(" - Union $name\n");
$name = $qualname;
$name =~ s/:/_/;
# Find union size
foreach my $e (@elements) {
my $type = $e->att('type');
my $info = getinfo($type);
$info->{'size'} > 0 or die ("Error: Union containing variable sized struct $type\n");
push @sizes, $info->{'size'};
}
@sizes = sort {$b <=> $a} @sizes;
my $size = $sizes[0];
print $decl "static int hf_x11_union_$name = -1;\n";
print $reg "{ &hf_x11_union_$name, { \"$name\", \"x11.union.$name\", FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL }},\n";
print $impl <<eot
static void struct_$name(tvbuff_t *tvb, int *offsetp, proto_tree *root, guint byte_order, int count)
{
int i;
int base = *offsetp;
for (i = 0; i < count; i++) {
proto_item *item;
proto_tree *t;
eot
;
my $varpat = 'union_'.$name.'_%s';
my $humanpat = "union.$name.%s";
my $refs = { field => {}, sumof => {} };
foreach my $e (@elements) {
reference_elements($e, $refs);
}
foreach my $e (@elements) {
register_element($e, $varpat, $humanpat, $refs, " ");
}
print $impl <<eot
item = proto_tree_add_item(root, hf_x11_union_$name, tvb, base, $size, ENC_NA);
t = proto_item_add_subtree(item, ett_x11_rectangle);
eot
;
foreach my $e (@elements) {
say $impl ' *offsetp = base;';
dissect_element($e, $varpat, $humanpat, 0, $refs, 0, " ");
}
say $impl " base += $size;";
say $impl ' }';
say $impl ' *offsetp = base;';
say $impl '}';
$struct{$qualname} = { size => $size, name => $name };
$t->purge;
}
sub enum {
my ($t, $elt) = @_;
my $name = $elt->att('name');
my $fullname = $incname[0].'_'.$name;
$enum_name{$name} = $fullname;
$enum_name{$incname[0].':'.$name} = $fullname;
if (defined $enum{$fullname}) {
$t->purge;
return;
}
my @elements = $elt->children('item');
print(" - Enum $name\n");
my $value = {};
my $bit = {};
my $rvalue = {};
my $rbit = {};
$enum{$fullname} = { value => $value, bit => $bit, rbit => $rbit, rvalue => $rvalue };
my $nextvalue = 0;
foreach my $e (@elements) {
my $n = $e->att('name');
my $valtype = $e->first_child(qr/value|bit/);
if (defined $valtype) {
my $val = int($valtype->text());
given ($valtype->name()) {
when ('value') {
$$value{$val} = $n;
$$rvalue{$n} = $val;
$nextvalue = $val + 1;
# Ugly hack to support (temporary, hopefully) ugly
# hack in xinput:ChangeDeviceProperty
# Register certain values as bits also
given ($val) {
when (8) {
$$bit{'3'} = $n;
$$rbit{$n} = 3;
}
when (16) {
$$bit{'4'} = $n;
$$rbit{$n} = 4;
}
when (32) {
$$bit{'5'} = $n;
$$rbit{$n} = 5;
}
}
}
when ('bit') {
$$bit{$val} = $n;
$$rbit{$n} = $val;
}
}
} else {
$$value{$nextvalue} = $n;
$nextvalue++;
}
}
$t->purge;
}
sub request {
my ($t, $elt) = @_;
my $name = $elt->att('name');
print(" - Request $name\n");
$request{$elt->att('opcode')} = $name;
my $length = 4;
my @elements = $elt->children(qr/pad|field|list|switch/);
# Wireshark defines _U_ to mean "Unused" (compiler specific define)
if (!@elements) {
print $impl <<eot
static void $header$name(tvbuff_t *tvb _U_, packet_info *pinfo _U_, int *offsetp _U_, proto_tree *t _U_, guint byte_order _U_, int length _U_)
{
eot
;
} else {
print $impl <<eot
static void $header$name(tvbuff_t *tvb, packet_info *pinfo _U_, int *offsetp, proto_tree *t, guint byte_order, int length _U_)
{
eot
;
}
my $varpat = $header.'_'.$name.'_%s';
my $humanpat = "$header.$name.%s";
my $refs = { field => {}, sumof => {} };
foreach my $e (@elements) {
reference_elements($e, $refs);
}
foreach my $e (@elements) {
register_element($e, $varpat, $humanpat, $refs);
}
foreach my $e (@elements) {
if ($e->name() eq 'list' && $name eq 'Render' && $e->att('name') eq 'data' && -e "$mesadir/gl_API.xml") {
# Special case: Use mesa-generated dissector for 'data'
print $impl " dispatch_glx_render(tvb, pinfo, offsetp, t, byte_order, (length - $length));\n";
} else {
$length = dissect_element($e, $varpat, $humanpat, $length, $refs, 1);
}
}
say $impl '}';
my $reply = $elt->first_child('reply');
if ($reply) {
$reply{$elt->att('opcode')} = $name;
$varpat = $header.'_'.$name.'_reply_%s';
$humanpat = "$header.$name.reply.%s";
@elements = $reply->children(qr/pad|field|list|switch/);
# Wireshark defines _U_ to mean "Unused" (compiler specific define)
if (!@elements) {
say $impl "static void $header$name"."_Reply(tvbuff_t *tvb _U_, packet_info *pinfo, int *offsetp _U_, proto_tree *t _U_, guint byte_order _U_)\n{";
} else {
say $impl "static void $header$name"."_Reply(tvbuff_t *tvb, packet_info *pinfo, int *offsetp, proto_tree *t, guint byte_order)\n{";
}
say $impl ' int sequence_number;' if (@elements);
my $refs = { field => {}, sumof => {} };
foreach my $e (@elements) {
reference_elements($e, $refs);
}
say $impl ' int f_length;' if ($refs->{field}{'length'});
say $impl ' int length;' if ($refs->{length});
foreach my $e (@elements) {
register_element($e, $varpat, $humanpat, $refs);
}
say $impl '';
say $impl ' col_append_fstr(pinfo->cinfo, COL_INFO, "-'.$name.'");';
say $impl '';
say $impl ' REPLY(reply);';
my $first = 1;
my $length = 1;
foreach my $e (@elements) {
$length = dissect_element($e, $varpat, $humanpat, $length, $refs);
if ($first) {
$first = 0;
say $impl ' sequence_number = tvb_get_guint16(tvb, *offsetp, byte_order);';
say $impl ' proto_tree_add_uint_format_value(t, hf_x11_reply_sequencenumber, tvb, *offsetp, 2, sequence_number,';
say $impl ' "%d ('.$header.'-'.$name.')", sequence_number);';
say $impl ' *offsetp += 2;';
if ($refs->{field}{length}) {
say $impl ' f_length = tvb_get_guint32(tvb, *offsetp, byte_order);';
}
if ($refs->{length}) {
say $impl ' length = f_length * 4 + 32;';
}
say $impl ' proto_tree_add_item(t, hf_x11_replylength, tvb, *offsetp, 4, byte_order);';
say $impl ' *offsetp += 4;';
$length += 6;
}
}
say $impl '}';
}
$t->purge;
}
sub defxid(@) {
my $name;
while ($name = shift) {
my $qualname = qualname($name);
$simpletype{$qualname} = { size => 4, encoding => 'byte_order', type => 'FT_UINT32', base => 'BASE_HEX', get => 'tvb_get_guint32', list => 'listOfCard32', };
$type_name{$name} = $qualname;
}
}
sub xidtype {
my ($t, $elt) = @_;
my $name = $elt->att('name');
defxid($name);
$t->purge;
}
sub typedef {
my ($t, $elt) = @_;
my $oldname = $elt->att('oldname');
my $newname = $elt->att('newname');
my $qualname = qualname($newname);
# Duplicate the type
my $info = get_simple_info($oldname);
if ($info) {
$simpletype{$qualname} = $info;
} elsif ($info = get_struct_info($oldname)) {
$struct{$qualname} = $info;
} else {
die ("$oldname not found while attempting to typedef $newname\n");
}
$type_name{$newname} = $qualname;
$t->purge;
}
sub error {
my ($t, $elt) = @_;
my $number = $elt->att('number');
if ($number >= 0) {
my $name = $elt->att('name');
print $error " \"$header-$name\",\n";
}
$t->purge;
}
sub event {
my ($t, $elt) = @_;
my $number = $elt->att('number');
$number or return;
my $name = $elt->att('name');
my $xge = $elt->att('xge');
if ($xge) {
$genericevent{$number} = $name;
} else {
$event{$number} = $name;
}
my $length = 1;
my @elements = $elt->children(qr/pad|field|list|switch/);
# Wireshark defines _U_ to mean "Unused" (compiler specific define)
if (!@elements) {
if ($xge) {
print $impl <<eot
static void $header$name(tvbuff_t *tvb _U_, int length _U_, int *offsetp _U_, proto_tree *t _U_, guint byte_order _U_)
{
} else {
print $impl <<eot
static void $header$name(tvbuff_t *tvb _U_, int *offsetp _U_, proto_tree *t _U_, guint byte_order _U_)
{
eot
;
}
} else {
if ($xge) {
$length = 10;
print $impl <<eot
static void $header$name(tvbuff_t *tvb, int length _U_, int *offsetp, proto_tree *t, guint byte_order)
{
eot
;
} else {
print $impl <<eot
static void $header$name(tvbuff_t *tvb, int *offsetp, proto_tree *t, guint byte_order)
{
eot
;
}
}
my $varpat = $header.'_'.$name.'_%s';
my $humanpat = "$header.$name.%s";
my $refs = { field => {}, sumof => {} };
foreach my $e (@elements) {
reference_elements($e, $refs);
}
foreach my $e (@elements) {
register_element($e, $varpat, $humanpat, $refs);
}
if ($xge) {
say $impl " proto_tree_add_uint_format_value(t, hf_x11_minor_opcode, tvb, *offsetp, 2, $number,";
say $impl " \"$name ($number)\");";
foreach my $e (@elements) {
$length = dissect_element($e, $varpat, $humanpat, $length, $refs);
}
} else {
my $first = 1;
foreach my $e (@elements) {
$length = dissect_element($e, $varpat, $humanpat, $length, $refs);
if ($first) {
$first = 0;
say $impl " CARD16(event_sequencenumber);";
}
}
}
say $impl "}\n";
$t->purge;
}
sub include_start {
my ($t, $elt) = @_;
my $header = $elt->att('header');
unshift @incname, $header;
}
sub include_end {
shift @incname;
}
sub include
{
my ($t, $elt) = @_;
my $include = $elt->text();
print " - Import $include\n";
my $xml = XML::Twig->new(
start_tag_handlers => {
'xcb' => \&include_start,
},
twig_roots => {
'import' => \&include,
'struct' => \&struct,
'xidtype' => \&xidtype,
'xidunion' => \&xidtype,
'typedef' => \&typedef,
'enum' => \&enum,
},
end_tag_handlers => {
'xcb' => \&include_end,
});
$xml->parsefile("$srcdir/xcbproto/src/$include.xml") or die ("Cannot open $include.xml\n");
$t->purge;
}
sub xcb_start {
my ($t, $elt) = @_;
$header = $elt->att('header');
$extname = ($elt->att('extension-name') or $header);
unshift @incname, $header;
print("Extension $extname\n");
undef %request;
undef %genericevent;
undef %event;
undef %reply;
%simpletype = ();
%enum_name = ();
%type_name = ();
print $error "const char *$header"."_errors[] = {\n";
}
sub xcb {
my ($t, $elt) = @_;
my $xextname = $elt->att('extension-xname');
my $lookup_name = $header . "_extension_minor";
my $error_name = $header . "_errors";
my $event_name = $header . "_events";
my $genevent_name = 'NULL';
my $reply_name = $header . "_replies";
print $decl "static int hf_x11_$lookup_name = -1;\n\n";
print $impl "static const value_string $lookup_name"."[] = {\n";
foreach my $req (sort {$a <=> $b} keys %request) {
print $impl " { $req, \"$request{$req}\" },\n";
}
print $impl " { 0, NULL }\n";
print $impl "};\n";
say $impl "const x11_event_info $event_name".'[] = {';
foreach my $e (sort {$a <=> $b} keys %event) {
say $impl " { \"$header-$event{$e}\", $header$event{$e} },";
}
say $impl ' { NULL, NULL }';
say $impl '};';
if (%genericevent) {
$genevent_name = $header.'_generic_events';
say $impl 'static const x11_generic_event_info '.$genevent_name.'[] = {';
for my $val (sort { $a <=> $b } keys %genericevent) {
say $impl sprintf(" { %3d, %s },", $val, $header.$genericevent{$val});
}
say $impl sprintf(" { %3d, NULL },", 0);
say $impl '};';
say $impl '';
}
print $impl "static x11_reply_info $reply_name"."[] = {\n";
foreach my $e (sort {$a <=> $b} keys %reply) {
print $impl " { $e, $header$reply{$e}_Reply },\n";
}
print $impl " { 0, NULL }\n";
print $impl "};\n";
print $reg "{ &hf_x11_$lookup_name, { \"extension-minor\", \"x11.extension-minor\", FT_UINT8, BASE_DEC, VALS($lookup_name), 0, \"minor opcode\", HFILL }},\n\n";
print $impl <<eot
static void dispatch_$header(tvbuff_t *tvb, packet_info *pinfo, int *offsetp, proto_tree *t, guint byte_order)
{
int minor, length;
minor = CARD8($lookup_name);
length = REQUEST_LENGTH();
col_append_fstr(pinfo->cinfo, COL_INFO, "-%s",
val_to_str(minor, $lookup_name,
"<Unknown opcode %d>"));
switch (minor) {
eot
;
foreach my $req (sort {$a <=> $b} keys %request) {
print $impl " case $req:\n";
print $impl " $header$request{$req}(tvb, pinfo, offsetp, t, byte_order, length);\n";
print $impl " break;\n";
}
say $impl " /* No need for a default case here, since Unknown is printed above,";
say $impl " and UNDECODED() is taken care of by dissect_x11_request */";
print $impl " }\n}\n";
print $impl <<eot
static void register_$header(void)
{
set_handler("$xextname", dispatch_$header, $error_name, $event_name, $genevent_name, $reply_name);
}
eot
;
print $error " NULL\n};\n\n";
push @register, $header;
}
sub find_version {
#my $git = `which git`;
#chomp($git);
#-x $git or return 'unknown';
my $lib = shift;
# this will generate an error on stderr if git isn't in our $PATH
# but that's OK. The version is still set to 'unknown' in that case
# and at least the operator could see it.
my $ver = `git --git-dir=$lib/.git describe --tags`;
$ver //= 'unknown';
chomp $ver;
return $ver;
}
sub add_generated_header {
my ($out, $using) = @_;
my $ver = find_version($using);
$using = File::Spec->abs2rel ($using, $srcdir);
print $out <<eot
/* Do not modify this file. */
/* It was automatically generated by $script_name
using $using version $ver */
eot
;
# Add license text
print $out <<eot
/*
* Copyright 2008, 2009, 2013, 2014 Open Text Corporation <pharris[AT]opentext.com>
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald[AT]wireshark.org>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
eot
;
}
# initialize core X11 protocol
# Do this in the Makefile now
#system('./process-x11-fields.pl < x11-fields');
# Extension implementation
$impl = new IO::File "> $srcdir/x11-extension-implementation.h"
or die ("Cannot open $srcdir/x11-extension-implementation.h for writing\n");
$error = new IO::File "> $srcdir/x11-extension-errors.h"
or die ("Cannot open $srcdir/x11-extension-errors.h for writing\n");
add_generated_header($impl, $srcdir . '/xcbproto');
add_generated_header($error, $srcdir . '/xcbproto');
# Open the files generated by process-x11-fields.pl for appending
$reg = new IO::File ">> $srcdir/x11-register-info.h"
or die ("Cannot open $srcdir/x11-register-info.h for appending\n");
$decl = new IO::File ">> $srcdir/x11-declarations.h"
or die ("Cannot open $srcdir/x11-declarations.h for appending\n");
print $reg "\n/* Generated by $script_name below this line */\n";
print $decl "\n/* Generated by $script_name below this line */\n";
# Mesa for glRender
if (-e "$mesadir/gl_API.xml") {
$enum = new IO::File "> $srcdir/x11-glx-render-enum.h"
or die ("Cannot open $srcdir/x11-glx-render-enum.h for writing\n");
add_generated_header($enum, $srcdir . '/mesa');
print $enum "static const value_string mesa_enum[] = {\n";
print $impl '#include "x11-glx-render-enum.h"'."\n\n";
print("Mesa glRender:\n");
$header = "glx_render";
my $xml = XML::Twig->new(
start_tag_handlers => {
},
twig_roots => {
'category' => \&mesa_category,
'enum' => \&mesa_enum,
'type' => \&mesa_type,
'function' => \&mesa_function,
});
$xml->parsefile("$mesadir/gl_API.xml") or die ("Cannot open gl_API\n");
for my $enum_key ( sort {$a<=>$b} keys %mesa_enum_hash) {
say $enum sprintf(" { 0x%04x, \"%s\" },", $enum_key, $mesa_enum_hash{$enum_key});
}
print $enum " { 0, NULL }\n";
print $enum "};\n";
$enum->close();
print $decl "static int hf_x11_glx_render_op_name = -1;\n\n";
print $impl "static const value_string glx_render_op_name"."[] = {\n";
foreach my $req (sort {$a <=> $b} keys %request) {
print $impl " { $req, \"gl$request{$req}\" },\n";
}
print $impl " { 0, NULL }\n";
print $impl "};\n";
print $impl "static value_string_ext mesa_enum_ext = VALUE_STRING_EXT_INIT(mesa_enum);\n";
print $reg "{ &hf_x11_glx_render_op_name, { \"render op\", \"x11.glx.render.op\", FT_UINT16, BASE_DEC, VALS(glx_render_op_name), 0, NULL, HFILL }},\n\n";
# Uses ett_x11_list_of_rectangle, since I am unable to see how the subtree type matters.
print $impl <<eot
static void dispatch_glx_render(tvbuff_t *tvb, packet_info *pinfo, int *offsetp, proto_tree *t, guint byte_order, int length)
{
while (length >= 4) {
guint32 op, len;
int next;
proto_item *ti;
proto_tree *tt;
len = tvb_get_guint16(tvb, *offsetp, byte_order);
op = tvb_get_guint16(tvb, *offsetp + 2, byte_order);
ti = proto_tree_add_uint(t, hf_x11_glx_render_op_name, tvb, *offsetp, len, op);
tt = proto_item_add_subtree(ti, ett_x11_list_of_rectangle);
ti = proto_tree_add_item(tt, hf_x11_request_length, tvb, *offsetp, 2, byte_order);
*offsetp += 2;
proto_tree_add_item(tt, hf_x11_glx_render_op_name, tvb, *offsetp, 2, byte_order);
*offsetp += 2;
if (len < 4) {
expert_add_info(pinfo, ti, &ei_x11_request_length);
/* Eat the rest of the packet, mark it undecoded */
len = length;
op = -1;
}
len -= 4;
next = *offsetp + len;
switch (op) {
eot
;
foreach my $req (sort {$a <=> $b} keys %request) {
print $impl " case $req:\n";
print $impl " mesa_$request{$req}(tvb, offsetp, tt, byte_order, len);\n";
print $impl " break;\n";
}
print $impl " default:\n";
print $impl " proto_tree_add_item(tt, hf_x11_undecoded, tvb, *offsetp, len, ENC_NA);\n";
print $impl " *offsetp += len;\n";
print $impl " }\n";
print $impl " if (*offsetp < next) {\n";
print $impl " proto_tree_add_item(tt, hf_x11_unused, tvb, *offsetp, next - *offsetp, ENC_NA);\n";
print $impl " *offsetp = next;\n";
print $impl " }\n";
print $impl " length -= (len + 4);\n";
print $impl " }\n}\n";
}
$enum = new IO::File "> $srcdir/x11-enum.h"
or die ("Cannot open $srcdir/x11-enum.h for writing\n");
add_generated_header($enum, $srcdir . '/xcbproto');
print $impl '#include "x11-enum.h"'."\n\n";
# XCB
foreach my $ext (@reslist) {
my $xml = XML::Twig->new(
start_tag_handlers => {
'xcb' => \&xcb_start,
},
twig_roots => {
'xcb' => \&xcb,
'import' => \&include,
'request' => \&request,
'struct' => \&struct,
'union' => \&union,
'xidtype' => \&xidtype,
'xidunion' => \&xidtype,
'typedef' => \&typedef,
'error' => \&error,
'errorcopy' => \&error,
'event' => \&event,
'enum' => \&enum,
});
$xml->parsefile($ext) or die ("Cannot open $ext\n");
}
print $impl "static void register_x11_extensions(void)\n{\n";
foreach my $reg (@register) {
print $impl " register_$reg();\n";
}
print $impl "}\n";
#
# Editor modelines
#
# Local Variables:
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
#
# ex: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
# |
Shell Script | wireshark/tools/randpkt-test.sh | #!/bin/bash
# Randpkt testing script for TShark
#
# This script uses Randpkt to generate capture files with randomized
# content. It runs TShark on each generated file and checks for errors.
# The files are processed repeatedly until an error is found.
TEST_TYPE="randpkt"
# shellcheck source=tools/test-common.sh
. "$( dirname "$0" )"/test-common.sh || exit 1
# Run under valgrind ?
VALGRIND=0
# Run under AddressSanitizer ?
ASAN=$CONFIGURED_WITH_ASAN
# Trigger an abort if a dissector finds a bug.
# Uncomment to disable
export WIRESHARK_ABORT_ON_DISSECTOR_BUG="True"
# The maximum permitted amount of memory leaked. Eventually this should be
# worked down to zero, but right now that would fail on every single capture.
# Only has effect when running under valgrind.
MAX_LEAK=$(( 1024 * 100 ))
# To do: add options for file names and limits
while getopts "ab:d:gp:t:" OPTCHAR ; do
case $OPTCHAR in
a) ASAN=1 ;;
b) WIRESHARK_BIN_DIR=$OPTARG ;;
d) TMP_DIR=$OPTARG ;;
g) VALGRIND=1 ;;
p) MAX_PASSES=$OPTARG ;;
t) PKT_TYPES=$OPTARG ;;
*) printf "Unknown option: %s\\n" "$OPTARG"
esac
done
shift $(( OPTIND - 1 ))
### usually you won't have to change anything below this line ###
ws_bind_exec_paths
ws_check_exec "$TSHARK" "$RANDPKT" "$DATE" "$TMP_DIR"
[[ -z "$PKT_TYPES" ]] && PKT_TYPES=$($RANDPKT -h | awk '/^\t/ {print $1}')
if [ $VALGRIND -eq 1 ]; then
RUNNER="$( dirname "$0" )/valgrind-wireshark.sh"
COMMON_ARGS="-b $WIRESHARK_BIN_DIR $COMMON_ARGS"
declare -a RUNNER_ARGS=("" "-T")
# Valgrind requires more resources, so permit 1.5x memory and 3x time
# (1.5x time is too small for a few large captures in the menagerie)
MAX_CPU_TIME=$(( 3 * "$MAX_CPU_TIME" ))
MAX_VMEM=$(( 3 * "$MAX_VMEM" / 2 ))
else
# Not using valgrind, use regular tshark.
# TShark arguments (you won't have to change these)
# n Disable network object name resolution
# V Print a view of the details of the packet rather than a one-line summary of the packet
# x Cause TShark to print a hex and ASCII dump of the packet data after printing the summary or details
# r Read packet data from the following infile
RUNNER="$TSHARK"
declare -a RUNNER_ARGS=("-nVxr" "-nr")
fi
RANDPKT_ARGS="-b 2000 -c 5000"
if [ $ASAN -ne 0 ]; then
echo -n "ASan enabled. Virtual memory limit is "
ulimit -v
else
echo "ASan disabled. Virtual memory limit is $MAX_VMEM"
fi
HOWMANY="forever"
if [ "$MAX_PASSES" -gt 0 ]; then
HOWMANY="$MAX_PASSES passes"
fi
echo -n "Running $RUNNER with args: "
printf "\"%s\" " "${RUNNER_ARGS[@]}"
echo "($HOWMANY)"
echo "Running $RANDPKT with args: $RANDPKT_ARGS"
echo ""
# Clean up on <ctrl>C, etc
trap_all() {
printf '\n\nCaught signal. Exiting.\n'
rm -f "$TMP_DIR/$TMP_FILE" "$TMP_DIR/$ERR_FILE"
exit 0
}
trap trap_all HUP INT TERM ABRT
# Iterate over our capture files.
PASS=0
while [ $PASS -lt "$MAX_PASSES" ] || [ "$MAX_PASSES" -lt 1 ] ; do
PASS=$(( PASS + 1 ))
echo "Pass $PASS:"
for PKT_TYPE in $PKT_TYPES ; do
if [ $PASS -gt "$MAX_PASSES" ] && [ "$MAX_PASSES" -ge 1 ] ; then
break # We caught a signal
fi
echo -n " $PKT_TYPE: "
DISSECTOR_BUG=0
VG_ERR_CNT=0
# shellcheck disable=SC2086
"$RANDPKT" $RANDPKT_ARGS -t "$PKT_TYPE" "$TMP_DIR/$TMP_FILE" \
> /dev/null 2>&1
for ARGS in "${RUNNER_ARGS[@]}" ; do
echo -n "($ARGS) "
echo -e "Command and args: $RUNNER $ARGS\\n" > "$TMP_DIR/$ERR_FILE"
# Run in a child process with limits.
(
# Set some limits to the child processes, e.g. stop it if
# it's running longer than MAX_CPU_TIME seconds. (ulimit
# is not supported well on cygwin - it shows some warnings -
# and the features we use may not all be supported on some
# UN*X platforms.)
ulimit -S -t $MAX_CPU_TIME -s $MAX_STACK
# Allow core files to be generated
ulimit -c unlimited
# Don't enable ulimit -v when using ASAN. See
# https://github.com/google/sanitizers/wiki/AddressSanitizer#ulimit--v
if [ $ASAN -eq 0 ]; then
ulimit -S -v $MAX_VMEM
fi
# shellcheck disable=SC2086
"$RUNNER" $ARGS "$TMP_DIR/$TMP_FILE" \
> /dev/null 2>> "$TMP_DIR/$ERR_FILE"
)
RETVAL=$?
if [ $VALGRIND -eq 1 ]; then
VG_ERR_CNT=$( grep "ERROR SUMMARY:" "$TMP_DIR/$ERR_FILE" | cut -f4 -d' ' )
VG_DEF_LEAKED=$( grep "definitely lost:" "$TMP_DIR/$ERR_FILE" | cut -f7 -d' ' | tr -d , )
VG_IND_LEAKED=$( grep "indirectly lost:" "$TMP_DIR/$ERR_FILE" | cut -f7 -d' ' | tr -d , )
VG_TOTAL_LEAKED=$(( "$VG_DEF_LEAKED" + "$VG_IND_LEAKED" ))
if [ $RETVAL -ne 0 ] ; then
echo "General Valgrind failure."
VG_ERR_CNT=1
elif [ "$VG_TOTAL_LEAKED" -gt "$MAX_LEAK" ] ; then
echo "Definitely + indirectly ($VG_DEF_LEAKED + $VG_IND_LEAKED) exceeds max ($MAX_LEAK)."
echo "Definitely + indirectly ($VG_DEF_LEAKED + $VG_IND_LEAKED) exceeds max ($MAX_LEAK)." >> "$TMP_DIR/$ERR_FILE"
VG_ERR_CNT=1
fi
if grep -q "Valgrind cannot continue" "$TMP_DIR/$ERR_FILE" ; then
echo "Valgrind unable to continue."
VG_ERR_CNT=-1
fi
fi
if [ $RETVAL -ne 0 ] ; then break ; fi
done
grep -i "dissector bug" "$TMP_DIR/$ERR_FILE" \
> /dev/null 2>&1 && DISSECTOR_BUG=1
if [ $RETVAL -ne 0 ] || [ $DISSECTOR_BUG -ne 0 ] || [ $VG_ERR_CNT -ne 0 ] ; then
ws_exit_error
fi
echo " OK"
rm -f "$TMP_DIR/$TMP_FILE" "$TMP_DIR/$ERR_FILE"
done
done |
Python | wireshark/tools/rdps.py | #!/usr/bin/env python3
#
# rdps.py
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
'''\
takes the file listed as the first argument and creates the file listed
as the second argument. It takes a PostScript file and creates a C source
with 2 functions:
print_ps_preamble()
print_ps_finale()
Ported to Python from rdps.c.
'''
import sys
import os.path
def ps_clean_string(raw_str):
ps_str = ''
for c in raw_str:
if c == '\\':
ps_str += '\\\\'
elif c == '\n':
ps_str += '\\n'
else:
ps_str += c
return ps_str
def start_code(fd, name):
fd.write("static const char ps_%s[] =\n" % name)
def write_code(fd, raw_str):
ps_str = ps_clean_string(raw_str)
fd.write("\t\"%s\"\n" % ps_str)
def end_code(fd, name):
fd.write(";\n")
fd.write("\n")
fd.write("void print_ps_%s(FILE *fd) {\n" % name)
fd.write("\tfwrite(ps_%s, sizeof ps_%s - 1, 1, fd);\n" % ( name, name ) )
fd.write("}\n\n\n")
def exit_err(msg=None, *param):
if msg is not None:
sys.stderr.write(msg % param)
sys.exit(1)
# Globals
STATE_NULL = 'null'
STATE_PREAMBLE = 'preamble'
STATE_FINALE = 'finale'
def main():
state = STATE_NULL
if len(sys.argv) != 3:
exit_err("%s: input_file output_file\n", __file__)
input = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
script_name = os.path.split(__file__)[-1]
output.write('''\
/* DO NOT EDIT
*
* Created by %s.
*
* ps.c
* Definitions for generating PostScript(R) packet output.
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <[email protected]>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <stdio.h>
#include "ps.h"
''' % script_name)
for line in input:
#line = line.rstrip()
if state == STATE_NULL:
if line.startswith("% ---- wireshark preamble start ---- %"):
state = STATE_PREAMBLE
start_code(output, "preamble")
continue
elif line.startswith("% ---- wireshark finale start ---- %"):
state = STATE_FINALE
start_code(output, "finale")
continue
elif state == STATE_PREAMBLE:
if line.startswith("% ---- wireshark preamble end ---- %"):
state = STATE_NULL
end_code(output, "preamble")
continue
else:
write_code(output, line)
elif state == STATE_FINALE:
if line.startswith("% ---- wireshark finale end ---- %"):
state = STATE_NULL
end_code(output, "finale")
continue
else:
write_code(output, line)
else:
exit_err("NO MATCH:%s", line)
sys.exit(0)
if __name__ == "__main__":
main()
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
# |
Shell Script | wireshark/tools/release-update-debian-soversions.sh | #!/bin/sh
#
# Compare ABIs of two Wireshark working copies
#
# Copyright 2017 Balint Reczey <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
# Set shared library package names and library versions in Debian packaging
# matching the new major release's so versions
set -e
for i in codecs wireshark wiretap wsutil; do
NEW_VERSION=$(grep SOVERSION "$(grep -l lib${i} ./*/CMakeLists.txt)" | sed 's/.*SOVERSION \([0-9]*\).*/\1/')
rename "s/0\\./${NEW_VERSION}./" packaging/debian/lib${i}0.*
grep -l -R "lib${i}0" packaging/debian/ | xargs sed -i "s/lib${i}0/lib${i}${NEW_VERSION}/"
grep -l -R "lib${i}\\.so\\.0" packaging/debian/ | xargs sed -i "s/lib${i}\\.so\\.0/lib${i}.so.${NEW_VERSION}/"
done |
Shell Script | wireshark/tools/rpm-setup.sh | #!/bin/bash
# Setup development environment for RPM based systems such as Red Hat, Centos, Fedora, openSUSE
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
# We drag in tools that might not be needed by all users; it's easier
# that way.
#
set -e -u -o pipefail
function print_usage() {
printf "\nUtility to setup a rpm-based system for Wireshark Development.\n"
printf "The basic usage installs the needed software\n\n"
printf "Usage: $0 [--install-optional] [...other options...]\n"
printf "\t--install-optional: install optional software as well\n"
printf "\t--install-rpm-deps: install packages required to build the .rpm file\n"
printf "\\t--install-qt5-deps: force installation of packages required to use Qt5\\n"
printf "\\t--install-qt6-deps: force installation of packages required to use Qt6\\n"
printf "\\t--install-all: install everything\\n"
printf "\t[other]: other options are passed as-is to the package manager\n"
}
ADDITIONAL=0
RPMDEPS=0
ADD_QT5=0
ADD_QT6=0
HAVE_ADD_QT=0
OPTIONS=
for arg; do
case $arg in
--help|-h)
print_usage
exit 0
;;
--install-optional)
ADDITIONAL=1
;;
--install-rpm-deps)
RPMDEPS=1
;;
--install-qt5-deps)
ADD_QT5=1
HAVE_ADD_QT=1
;;
--install-qt6-deps)
ADD_QT6=1
HAVE_ADD_QT=1
;;
--install-all)
ADDITIONAL=1
RPMDEPS=1
ADD_QT5=1
ADD_QT6=1
HAVE_ADD_QT=1
;;
*)
OPTIONS="$OPTIONS $arg"
;;
esac
done
# Check if the user is root
if [ $(id -u) -ne 0 ]
then
echo "You must be root."
exit 1
fi
BASIC_LIST="cmake \
gcc \
gcc-c++ \
flex \
python3 \
desktop-file-utils \
git \
glib2-devel \
libpcap-devel \
pcre2-devel \
zlib-devel \
libgcrypt-devel"
ADDITIONAL_LIST="libcap-devel \
libssh-devel \
krb5-devel \
perl-Parse-Yapp \
snappy-devel \
minizip-devel \
lz4 \
libxml2-devel \
perl \
spandsp-devel \
systemd-devel \
python3-pytest \
python3-pytest-xdist"
# Uncomment to add PNG compression utilities used by compress-pngs:
# ADDITIONAL_LIST="$ADDITIONAL_LIST \
# advancecomp \
# optipng \
# oxipng \
# pngcrush"
# XXX
RPMDEPS_LIST="rpm-build"
# Guess which package manager we will use
for PM in zypper dnf yum ''; do
if type "$PM" >/dev/null 2>&1; then
break
fi
done
if [ -z $PM ]
then
echo "No package managers found, exiting"
exit 1
fi
PM_OPT=
case $PM in
zypper)
PM_OPT="--non-interactive"
PM_SEARCH="search -x --provides"
;;
dnf)
PM_SEARCH="info"
;;
yum)
PM_SEARCH="info"
;;
esac
echo "Using $PM ($PM_SEARCH)"
# Adds package $2 to list variable $1 if the package is found
add_package() {
local list="$1" pkgname="$2"
# fail if the package is not known
$PM $PM_SEARCH "$pkgname" &> /dev/null || return 1
# package is found, append it to list
eval "${list}=\"\${${list}} \${pkgname}\""
}
# Adds packages $2-$n to list variable $1 if all the packages are found
add_packages() {
local list="$1" pkgnames="${@:2}"
# fail if any package is not known
for pkgname in $pkgnames; do
$PM $PM_SEARCH "$pkgname" &> /dev/null || return 1
done
# all packages are found, append it to list
eval "${list}=\"\${${list}} \${pkgnames}\""
}
add_package BASIC_LIST glib2 || add_package BASIC_LIST libglib-2_0-0 ||
echo "Required package glib2|libglib-2_0-0 is unavailable" >&2
# lua51, lua51-devel: OpenSUSE Leap 42.3 (lua would be fine too, as it installs lua52), OpenSUSE Leap 15.0 (lua installs lua53, so it wouldn't work)
# compat-lua, compat-lua-devel: Fedora 28, Fedora 29, CentOS 8
# lua, lua-devel: CentOS 7
add_package BASIC_LIST lua51-devel || add_package BASIC_LIST compat-lua-devel || add_package BASIC_LIST lua-devel ||
echo "Required package lua51-devel|compat-lua-devel|lua-devel is unavailable" >&2
add_package BASIC_LIST lua51 || add_package BASIC_LIST compat-lua || add_package BASIC_LIST lua ||
echo "Required package lua51|compat-lua|lua is unavailable" >&2
add_package BASIC_LIST libpcap || add_package BASIC_LIST libpcap1 ||
echo "Required package libpcap|libpcap1 is unavailable" >&2
add_package BASIC_LIST zlib || add_package BASIC_LIST libz1 ||
echo "Required package zlib|libz1 is unavailable" >&2
add_package BASIC_LIST c-ares-devel || add_package BASIC_LIST libcares-devel ||
echo "Required package c-ares-devel|libcares-devel is unavailable" >&2
add_package BASIC_LIST speexdsp-devel || add_package BASIC_LIST speex-devel ||
echo "Required package speexdsp-devel|speex-devel is unavailable" >&2
if [ $HAVE_ADD_QT -eq 0 ]
then
# Try to select Qt version from distro
test -e /etc/os-release && os_release='/etc/os-release' || os_release='/usr/lib/os-release'
# shellcheck disable=SC1090
. "${os_release}"
# Fedora 35 or later
if [ "${ID:-linux}" = "fedora" ] && [ "${VERSION_ID:-0}" -ge "35" ]; then
echo "Installing Qt6."
ADD_QT6=1
else
echo "Installing Qt5."
ADD_QT5=1
fi
fi
if [ $ADD_QT5 -ne 0 ]
then
# qt5-linguist: CentOS, Fedora
# libqt5-linguist-devel: OpenSUSE
add_package BASIC_LIST qt5-linguist ||
add_package BASIC_LIST libqt5-linguist-devel ||
echo "Required package qt5-linguist|libqt5-linguist-devel is unavailable" >&2
# qt5-qtmultimedia: CentOS, Fedora, pulls in qt5-qtbase-devel (big dependency list!)
# libqt5-qtmultimedia-devel: OpenSUSE, pulls in Core, Gui, Multimedia, Network, Widgets
# OpenSUSE additionally has a separate Qt5PrintSupport package.
add_package BASIC_LIST qt5-qtmultimedia-devel ||
add_packages BASIC_LIST libqt5-qtmultimedia-devel libQt5PrintSupport-devel ||
echo "Required Qt5 Mutlimedia and/or Qt5 Print Support is unavailable" >&2
# This in only required on OpenSUSE
add_package BASIC_LIST libqt5-qtsvg-devel ||
echo "Required OpenSUSE package libqt5-qtsvg-devel is unavailable. Not required for other distributions." >&2
# This in only required on OpenSUSE
add_package BASIC_LIST libQt5Concurrent-devel ||
echo "Required OpenSUSE package libQt5Concurrent-devel is unavailable. Not required for other distributions." >&2
add_package ADDITIONAL_LIST qt5-qtimageformats ||
add_package ADDITIONAL_LIST libqt5-qtimageformats ||
echo "Optional Qt5 Image Formats is unavailable" >&2
fi
if [ $ADD_QT6 -ne 0 ]
then
# Fedora Qt6 packages required from a minimal installation
QT6_LIST=(qt6-qtbase-devel
qt6-qttools-devel
qt6-qt5compat-devel
qt6-qtmultimedia-devel
libxkbcommon-devel)
for pkg in ${QT6_LIST[@]}
do
add_package BASIC_LIST "$pkg" ||
echo "Qt6 dependency $pkg is unavailable" >&2
done
add_package ADDITIONAL_LIST qt6-qtimageformats ||
echo "Optional Qt6 Image Formats is unavailable" >&2
fi
# This in only required on OpenSUSE
add_packages BASIC_LIST hicolor-icon-theme xdg-utils ||
echo "Required OpenSUSE packages hicolor-icon-theme and xdg-utils are unavailable. Not required for other distirbutions." >&2
# This in only required (and available) on OpenSUSE
add_package BASIC_LIST update-desktop-files ||
echo "Required OpenSUSE package update-desktop-files is unavailable. Not required for other distributions." >&2
# rubygem-asciidoctor.noarch: Centos, Fedora
# (Added to RHEL/Centos 8: https://bugzilla.redhat.com/show_bug.cgi?id=1820896 )
# ruby2.5-rubygem-asciidoctor: openSUSE 15.2
add_package RPMDEPS_LIST rubygem-asciidoctor.noarch || add_package RPMDEPS_LIST ruby2.5-rubygem-asciidoctor ||
echo "RPM dependency asciidoctor is unavailable" >&2
# libcap: CentOS 7, Fedora 28, Fedora 29
# libcap2: OpenSUSE Leap 42.3, OpenSUSE Leap 15.0
add_package ADDITIONAL_LIST libcap || add_package ADDITIONAL_LIST libcap2 ||
echo "Optional package libcap|libcap2 is unavailable" >&2
add_package ADDITIONAL_LIST nghttp2-devel || add_package ADDITIONAL_LIST libnghttp2-devel ||
echo "Optional package nghttp2-devel|libnghttp2-devel is unavailable" >&2
add_package ADDITIONAL_LIST snappy || add_package ADDITIONAL_LIST libsnappy1 ||
echo "Optional package snappy|libsnappy1 is unavailable" >&2
add_package ADDITIONAL_LIST libzstd-devel || echo "Optional package lbzstd-devel is unavailable" >&2
add_package ADDITIONAL_LIST lz4-devel || add_package ADDITIONAL_LIST liblz4-devel ||
echo "Optional package lz4-devel|liblz4-devel is unavailable" >&2
add_package ADDITIONAL_LIST libcap-progs || echo "Optional package libcap-progs is unavailable" >&2
add_package ADDITIONAL_LIST libmaxminddb-devel ||
echo "Optional package libmaxminddb-devel is unavailable" >&2
add_package ADDITIONAL_LIST gnutls-devel || add_package ADDITIONAL_LIST libgnutls-devel ||
echo "Optional package gnutls-devel|libgnutls-devel is unavailable" >&2
add_package ADDITIONAL_LIST gettext-devel || add_package ADDITIONAL_LIST gettext-tools ||
echo "Optional package gettext-devel|gettext-tools is unavailable" >&2
add_package ADDITIONAL_LIST ninja || add_package ADDITIONAL_LIST ninja-build ||
echo "Optional package ninja|ninja-build is unavailable" >&2
add_package ADDITIONAL_LIST libxslt || add_package ADDITIONAL_LIST libxslt1 ||
echo "Optional package libxslt|libxslt1 is unavailable" >&2
add_package ADDITIONAL_LIST docbook-style-xsl || add_package ADDITIONAL_LIST docbook-xsl-stylesheets ||
echo "Optional package docbook-style-xsl|docbook-xsl-stylesheets is unavailable" >&2
add_package ADDITIONAL_LIST brotli-devel || add_packages ADDITIONAL_LIST libbrotli-devel libbrotlidec1 ||
echo "Optional packages brotli-devel|libbrotli-devel is unavailable" >&2
add_package ADDITIONAL_LIST libnl3-devel || add_package ADDITIONAL_LIST libnl-devel ||
echo "Optional package libnl3-devel|libnl-devel are unavailable" >&2
add_package ADDITIONAL_LIST ilbc-devel ||
echo "Optional package ilbc-devel is unavailable" >&2
# opus-devel: RHEL/CentOS, Fedora
# libopus-devel: OpenSUSE
add_package ADDITIONAL_LIST opus-devel || add_package ADDITIONAL_LIST libopus-devel ||
echo "Optional package opus-devel|libopus-devel is unavailable" >&2
add_package ADDITIONAL_LIST bcg729-devel ||
echo "Optional package bcg729-devel is unavailable" >&2
# RHEL 8 / CentOS 8 are missing the -devel packages for sbc and libsmi due to
# RH deciding not to ship all -devel packages.
# https://wiki.centos.org/FAQ/CentOS8/UnshippedPackages
# There are CentOS bugs filed to add them to the Devel repository and eventually
# RHEL 8 CRB / CentOS PowerTools, but make them optional for now.
# https://bugs.centos.org/view.php?id=16504
# https://bugs.centos.org/view.php?id=17824
add_package ADDITIONAL_LIST sbc-devel ||
echo "Optional package sbc-devel is unavailable"
add_package ADDITIONAL_LIST libsmi-devel ||
echo "Optional package libsmi-devel is unavailable"
add_package ADDITIONAL_LIST opencore-amr-devel ||
echo "Optional package opencore-amr-devel is unavailable" >&2
ACTUAL_LIST=$BASIC_LIST
# Now arrange for optional support libraries
if [ $ADDITIONAL -ne 0 ]
then
ACTUAL_LIST="$ACTUAL_LIST $ADDITIONAL_LIST"
fi
if [ $RPMDEPS -ne 0 ]
then
ACTUAL_LIST="$ACTUAL_LIST $RPMDEPS_LIST"
fi
$PM $PM_OPT install $ACTUAL_LIST $OPTIONS
if [ $ADDITIONAL -eq 0 ]
then
echo -e "\n*** Optional packages not installed. Rerun with --install-optional to have them.\n"
fi
if [ $RPMDEPS -eq 0 ]
then
printf "\n*** RPM packages build deps not installed. Rerun with --install-rpm-deps to have them.\n"
fi |
Python | wireshark/tools/sharkd_shell.py | #!/usr/bin/env python3
# Convenience shell for using sharkd, including history and tab completion.
#
# Copyright (c) 2019 Peter Wu <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import argparse
import contextlib
import glob
import json
import logging
import os
import readline
import selectors
import signal
import subprocess
import sys
_logger = logging.getLogger(__name__)
# grep -Po 'tok_req, "\K\w+' sharkd_session.c
all_commands = """
load
status
analyse
info
check
complete
frames
tap
follow
iograph
intervals
frame
setcomment
setconf
dumpconf
download
bye
""".split()
all_commands += """
!pretty
!histfile
!debug
""".split()
class SharkdShell:
def __init__(self, pretty, history_file):
self.pretty = pretty
self.history_file = history_file
def ignore_sigint(self):
# Avoid terminating the sharkd child when ^C in the shell.
signal.signal(signal.SIGINT, signal.SIG_IGN)
def sharkd_process(self):
sharkd = 'sharkd'
env = os.environ.copy()
# Avoid loading user preferences which may trigger deprecation warnings.
env['WIRESHARK_CONFIG_DIR'] = '/nonexistent'
proc = subprocess.Popen([sharkd, '-'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
preexec_fn=self.ignore_sigint)
banner = proc.stderr.read1().decode('utf8')
if banner.strip() != 'Hello in child.':
_logger.warning('Unexpected banner: %r', banner)
return proc
def completer(self, text, state):
if state == 0:
origline = readline.get_line_buffer()
line = origline.lstrip()
skipped = len(origline) - len(line)
startpos = readline.get_begidx() - skipped
curpos = readline.get_endidx() - skipped
# _logger.debug('Completing: head=%r cur=%r tail=%r',
# line[:startpos], line[startpos:curpos], line[curpos:])
completions = []
if startpos == 0:
completions = all_commands
elif line[:1] == '!':
cmd = line[1:startpos].strip()
if cmd == 'pretty':
completions = ['jq', 'indent', 'off']
elif cmd == 'histfile':
# spaces in paths are not supported for now.
completions = glob.glob(glob.escape(text) + '*')
elif cmd == 'debug':
completions = ['on', 'off']
completions = [x for x in completions if x.startswith(text)]
if len(completions) == 1:
completions = [completions[0] + ' ']
self.completions = completions
try:
return self.completions[state]
except IndexError:
return None
def wrap_exceptions(self, fn):
# For debugging, any exception in the completion function is usually
# silently ignored by readline.
def wrapper(*args):
try:
return fn(*args)
except Exception as e:
_logger.exception(e)
raise
return wrapper
def add_history(self, line):
# Emulate HISTCONTROL=ignorespace to avoid adding to history.
if line.startswith(' '):
return
# Emulate HISTCONTROL=ignoredups to avoid duplicate history entries.
nitems = readline.get_current_history_length()
lastline = readline.get_history_item(nitems)
if lastline != line:
readline.add_history(line)
def parse_command(self, cmd):
'''Converts a user-supplied command to a sharkd one.'''
# Support 'foo {...}' as alias for '{"req": "foo", ...}'
if cmd[0].isalpha():
if ' ' in cmd:
req, cmd = cmd.split(' ', 1)
else:
req, cmd = cmd, '{}'
elif cmd[0] == '!':
return self.parse_special_command(cmd[1:])
else:
req = None
try:
c = json.loads(cmd)
if req is not None:
c['req'] = req
except json.JSONDecodeError as e:
_logger.error('Invalid command: %s', e)
return
if type(c) != dict or not 'req' in c:
_logger.error('Missing req key in request')
return
return c
def parse_special_command(self, cmd):
args = cmd.split()
if not args:
_logger.warning('Missing command')
return
if args[0] == 'pretty':
choices = ['jq', 'indent']
if len(args) >= 2:
self.pretty = args[1] if args[1] in choices else None
print('Pretty printing is now', self.pretty or 'disabled')
elif args[0] == 'histfile':
if len(args) >= 2:
self.history_file = args[1] if args[1] != 'off' else None
print('History is now', self.history_file or 'disabled')
elif args[0] == 'debug':
if len(args) >= 2 and args[1] in ('on', 'off'):
_logger.setLevel(
logging.DEBUG if args[1] == 'on' else logging.INFO)
print('Debug logging is now',
['off', 'on'][_logger.level == logging.DEBUG])
else:
_logger.warning('Unsupported command %r', args[0])
@contextlib.contextmanager
def wrap_history(self):
'''Loads history at startup and saves history on exit.'''
readline.set_auto_history(False)
try:
if self.history_file:
readline.read_history_file(self.history_file)
h_len = readline.get_current_history_length()
except FileNotFoundError:
h_len = 0
try:
yield
finally:
new_items = readline.get_current_history_length() - h_len
if new_items > 0 and self.history_file:
open(self.history_file, 'a').close()
readline.append_history_file(new_items, self.history_file)
def shell_prompt(self):
'''Sets up the interactive prompt.'''
readline.parse_and_bind("tab: complete")
readline.set_completer(self.wrap_exceptions(self.completer))
readline.set_completer_delims(' ')
return self.wrap_history()
def read_command(self):
while True:
try:
origline = input('# ')
except EOFError:
raise
except KeyboardInterrupt:
print('^C', file=sys.stderr)
continue
cmd = origline.strip()
if not cmd:
return
self.add_history(origline)
c = self.parse_command(cmd)
if c:
return json.dumps(c)
def want_input(self):
'''Request the prompt to be displayed.'''
os.write(self.user_input_wr, b'x')
def main_loop(self):
sel = selectors.DefaultSelector()
user_input_rd, self.user_input_wr = os.pipe()
self.want_input()
with self.sharkd_process() as proc, self.shell_prompt():
self.process = proc
sel.register(proc.stdout, selectors.EVENT_READ, self.handle_stdout)
sel.register(proc.stderr, selectors.EVENT_READ, self.handle_stderr)
sel.register(user_input_rd, selectors.EVENT_READ, self.handle_user)
interrupts = 0
while True:
try:
events = sel.select()
_logger.debug('got events: %r', events)
if not events:
break
for key, mask in events:
key.data(key)
interrupts = 0
except KeyboardInterrupt:
print('Interrupt again to abort immediately.', file=sys.stderr)
interrupts += 1
if interrupts >= 2:
break
if self.want_command:
self.ask_for_command_and_run_it()
# Process died? Stop the shell.
if proc.poll() is not None:
break
def handle_user(self, key):
'''Received a notification that another prompt can be displayed.'''
os.read(key.fileobj, 4096)
self.want_command = True
def ask_for_command_and_run_it(self):
cmd = self.read_command()
if not cmd:
# Give a chance for the event loop to run again.
self.want_input()
return
self.want_command = False
_logger.debug('Running: %r', cmd)
self.process.stdin.write((cmd + '\n').encode('utf8'))
self.process.stdin.flush()
def handle_stdout(self, key):
resp = key.fileobj.readline().decode('utf8')
_logger.debug('Response: %r', resp)
if not resp:
raise EOFError
self.want_input()
resp = resp.strip()
if resp:
try:
if self.pretty == 'jq':
subprocess.run(['jq', '.'], input=resp,
universal_newlines=True)
elif self.pretty == 'indent':
r = json.loads(resp)
json.dump(r, sys.stdout, indent=' ')
print('')
else:
print(resp)
except Exception as e:
_logger.warning('Dumping output as-is due to: %s', e)
print(resp)
def handle_stderr(self, key):
data = key.fileobj.read1().decode('utf8')
print(data, end="", file=sys.stderr)
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true',
help='Enable verbose logging')
parser.add_argument('--pretty', choices=['jq', 'indent'],
help='Pretty print responses (one of: %(choices)s)')
parser.add_argument('--histfile',
help='Log shell history to this file')
def main(args):
logging.basicConfig()
_logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
shell = SharkdShell(args.pretty, args.histfile)
try:
shell.main_loop()
except EOFError:
print('')
if __name__ == '__main__':
main(parser.parse_args()) |
Shell Script | wireshark/tools/test-captures.sh | #!/bin/bash
# A little script to run tshark on capture file[s] (potentially ones that
# failed fuzz testing). Useful because it sets up ulimits and other environment
# variables for you to ensure things like misused ephemeral memory are caught.
# (I'm writing this after having my machine hang up for like 15 minutes because
# I wasn't paying attention while tshark was running on a fuzzed capture and
# it used all my RAM + swap--which was pretty painful.)
#
# Copyright 2012 Jeff Morriss <jeff.morriss.ws [AT] gmail.com>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
TEST_TYPE="manual"
# shellcheck source=tools/test-common.sh
. "$( dirname "$0" )"/test-common.sh || exit 1
# Run under AddressSanitizer ?
ASAN=$CONFIGURED_WITH_ASAN
while getopts "ab:" OPTCHAR ; do
case $OPTCHAR in
a) ASAN=1 ;;
b) WIRESHARK_BIN_DIR=$OPTARG ;;
*) printf "Unknown option: %s\\n" "$OPTARG"
esac
done
shift $(( OPTIND - 1 ))
if [ $# -lt 1 ]
then
printf "Usage: %s [-b bin_dir] /path/to/file[s].pcap\\n" "$( basename "$0" )"
exit 1
fi
ws_bind_exec_paths
ws_check_exec "$TSHARK"
# Set some limits to the child processes, e.g. stop it if it's running
# longer than MAX_CPU_TIME seconds. (ulimit is not supported well on
# cygwin - it shows some warnings - and the features we use may not all
# be supported on some UN*X platforms.)
ulimit -S -t $MAX_CPU_TIME
# Allow core files to be generated
ulimit -c unlimited
# Don't enable ulimit -v when using ASAN. See
# https://github.com/google/sanitizers/wiki/AddressSanitizer#ulimit--v
if [ $ASAN -eq 0 ]; then
ulimit -S -v $MAX_VMEM
fi
for file in "$@"
do
echo "Testing file $file..."
echo -n " - with tree... "
if $TSHARK -nVxr "$file" > /dev/null
then
echo "OK"
echo -n " - without tree... "
if "$WIRESHARK_BIN_DIR/tshark" -nr "$file" > /dev/null
then
echo "OK"
echo -n " - without tree but with a read filter... "
if "$WIRESHARK_BIN_DIR/tshark" -Yframe -nr "$file" > /dev/null
then
echo "OK"
else
echo "Failed"
exit 1
fi
else
echo "Failed"
exit 1
fi
else
echo "Failed"
exit 1
fi
done |
Shell Script | wireshark/tools/test-common.sh | #!/bin/bash
#
# Copyright 2013 Gerald Combs <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
# Common variables and functions for fuzz and randpkt tests.
# This needs to point to a 'date' that supports %s.
if [ -z "$TEST_TYPE" ] ; then
echo "TEST_TYPE must be defined by the sourcing script."
exit 1
fi
DATE=/bin/date
BASE_NAME=$TEST_TYPE-$($DATE +%Y-%m-%d)-$$
# Directory containing binaries. Default: cmake run directory.
if [ -z "$WIRESHARK_BIN_DIR" ]; then
WIRESHARK_BIN_DIR=run
fi
# Temporary file directory and names.
# (had problems with this on cygwin, tried TMP_DIR=./ which worked)
TMP_DIR=/tmp
if [ "$OSTYPE" == "cygwin" ] ; then
TMP_DIR=$(cygpath --windows "$TMP_DIR")
fi
TMP_FILE=$BASE_NAME.pcap
ERR_FILE=$BASE_NAME.err
# Loop this many times (< 1 loops forever)
MAX_PASSES=0
# These may be set to your liking
# Stop the child process if it's running longer than x seconds
MAX_CPU_TIME=600
# Stop the child process if it's using more than y * 1024 bytes
MAX_VMEM=1000000
# Stop the child process if its stack is larger than z * 1024 bytes
# Windows XP: 2033
# Windows 7: 2034
# Mac OS X 10.6: 8192
# Linux 2.6.24: 8192
# Solaris 10: 8192
MAX_STACK=2033
# Insert z times an error into the capture file (0.02 seems to be a good value to find errors)
ERR_PROB=0.02
# Maximum number of packets to fuzz
MAX_FUZZ_PACKETS=50000
# Call *after* any changes to WIRESHARK_BIN_DIR (e.g., via command-line options)
function ws_bind_exec_paths() {
# Tweak the following to your liking. Editcap must support "-E".
TSHARK="$WIRESHARK_BIN_DIR/tshark"
EDITCAP="$WIRESHARK_BIN_DIR/editcap"
CAPINFOS="$WIRESHARK_BIN_DIR/capinfos"
RANDPKT="$WIRESHARK_BIN_DIR/randpkt"
if [ "$WIRESHARK_BIN_DIR" = "." ]; then
export WIRESHARK_RUN_FROM_BUILD_DIRECTORY=1
fi
}
function ws_check_exec() {
NOTFOUND=0
for i in "$@" ; do
if [ ! -x "$i" ]; then
echo "Couldn't find \"$i\""
NOTFOUND=1
fi
done
if [ $NOTFOUND -eq 1 ]; then
exit 1
fi
}
source "$(dirname "$0")"/debug-alloc.env
# Address Sanitizer options
export ASAN_OPTIONS=detect_leaks=0
# See if we were configured with gcc or clang's AddressSanitizer.
CONFIGURED_WITH_ASAN=0
# If tshark is built with ASAN this will generate an error. We could
# also pass help=1 and look for help text.
ASAN_OPTIONS=Invalid_Option_Flag $TSHARK -h > /dev/null 2>&1
if [ $? -ne 0 ] ; then
CONFIGURED_WITH_ASAN=1
fi
export CONFIGURED_WITH_ASAN
# Create an error report
function ws_exit_error() {
echo -e "\n ERROR"
echo -e "Processing failed. Capture info follows:\n"
echo " Input file: $CF"
echo " Output file: $TMP_DIR/$TMP_FILE"
echo " Pass: $PASS"
echo
# Fill in build information
{
if [ -n "$CI_COMMIT_BRANCH" ] ; then
printf "Branch: %s\\n" "$CI_COMMIT_BRANCH"
else
printf "Branch: %s\\n" "$(git rev-parse --abbrev-ref HEAD)"
fi
printf "Input file: %s\\n" "$CF"
if [ -n "$CI_JOB_NAME" ] ; then
printf "CI job name: %s, ID: %s\\n" "$CI_JOB_NAME" "$CI_JOB_ID"
printf "CI job URL: %s\\n" "$CI_JOB_URL"
fi
printf "Return value: %s\\n" "$RETVAL"
printf "Dissector bug: %s\\n" "$DISSECTOR_BUG"
if [ "$VALGRIND" -eq 1 ] ; then
printf "Valgrind error count: %s\\n" "$VG_ERR_CNT"
fi
printf "Date and time: %s\\n" "$( date --utc )"
SINCE_HOURS=48
if [ -d "${GIT_DIR:-.git}" ] ; then
printf "\\nCommits in the last %s hours:\\n" $SINCE_HOURS
git --no-pager log --oneline --no-decorate --since=${SINCE_HOURS}hours
printf "\\n"
fi
printf "Build host information:\\n"
uname -srvm
lsb_release -a 2> /dev/null
printf "\\n"
} > "$TMP_DIR/${ERR_FILE}.header"
# Trim the stderr output if needed
ERR_SIZE=$(du -sk $TMP_DIR/$ERR_FILE | awk '{ print $1 }')
if [ $ERR_SIZE -ge 5000 ] ; then
mv $TMP_DIR/$ERR_FILE $TMP_DIR/${ERR_FILE}.full
head -n 2000 $TMP_DIR/${ERR_FILE}.full > $TMP_DIR/$ERR_FILE
echo -e "\n\n[ Output removed ]\n\n" >> $TMP_DIR/$ERR_FILE
tail -n 2000 $TMP_DIR/${ERR_FILE}.full >> $TMP_DIR/$ERR_FILE
rm -f $TMP_DIR/${ERR_FILE}.full
fi
cat $TMP_DIR/${ERR_FILE} >> $TMP_DIR/${ERR_FILE}.header
mv $TMP_DIR/${ERR_FILE}.header $TMP_DIR/${ERR_FILE}
echo -e "stderr follows:\n"
cat $TMP_DIR/$ERR_FILE
exit 255
} |
Python | wireshark/tools/update-appdata.py | #!/usr/bin/env python3
#
# update-appdata.py - Update the <releases/> section of resources/freedesktop/org.wireshark.Wireshark.metainfo.xml.
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
'''Update the <release> tag in resources/freedesktop/org.wireshark.Wireshark.metainfo.xml
According to https://www.freedesktop.org/software/appstream/docs/chap-Metadata.html
the <releases/> tag in resources/freedesktop/org.wireshark.Wireshark.metainfo.xml should contain release
information sorted newest to oldest.
As part of our release process, when we create release tag x.y.z, we tag
the next commit x.y.z+1rc0, e.g.
v3.0.0 2019-02-28 release tag
v3.0.1rc0 2019-02-28 next commit after v3.0.0
v3.0.1 2019-04-08 release tag
v3.0.2rc0 2019-04-08 next commit after v3.0.1
Find a list of release versions based on our most recent rc0 tag and
update the <releases/> section of resources/freedesktop/org.wireshark.Wireshark.metainfo.xml accordingly.
Assume that the tag for the most recent release doesn't exist and use
today's date for it.
'''
from datetime import date
import io
import os.path
import re
import subprocess
import sys
import time
def main():
if sys.version_info[0] < 3:
print("This requires Python 3")
sys.exit(2)
this_dir = os.path.dirname(__file__)
appdata_xml = os.path.join(this_dir, '..', 'resources', 'freedesktop', 'org.wireshark.Wireshark.metainfo.xml')
try:
tag_cp = subprocess.run(
['git', 'tag', '-l', 'wireshark-*'],
encoding='UTF-8',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not 'wireshark-' in tag_cp.stdout:
print('Wireshark release tag not found')
sys.exit(1)
except Exception:
print('`git tag` returned {}:'.format(tag_cp.returncode))
raise
try:
cur_rc0 = subprocess.run(
['git', 'describe', '--match', 'v*rc0'],
check=True,
encoding='UTF-8',
stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout
except Exception:
print('Unable to fetch most recent rc0.')
raise
try:
ver_m = re.match('v(\d+\.\d+)\.(\d+)rc0.*', cur_rc0)
maj_min = ver_m.group(1)
next_micro = ver_m.group(2)
except Exception:
print('Unable to fetch major.minor version.')
raise
# https://www.freedesktop.org/software/appstream/docs/chap-Metadata.html#tag-releases
release_tag_fmt = '''\
<release version="{0}.{1}" date="{2}">
<url>https://www.wireshark.org/docs/relnotes/wireshark-{0}.{1}.html</url>
</release>
'''
release_tag_l = [
release_tag_fmt.format(maj_min, next_micro, date.fromtimestamp(time.time()).isoformat())
]
for micro in range(int(next_micro) - 1, -1, -1):
try:
tag_date = subprocess.run(
['git', 'log', '-1', '--format=%cd', '--date=format:%F', 'v{}.{}'.format(maj_min, micro)],
check=True,
encoding='UTF-8',
stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.strip()
release_tag_l.append(release_tag_fmt.format(maj_min, micro, tag_date))
except Exception:
print('Unable to fetch release tag')
raise
ax_lines = []
with io.open(appdata_xml, 'r', encoding='UTF-8') as ax_fd:
in_releases = False
for line in ax_fd:
if '</releases>' in line:
in_releases = False
if in_releases:
continue
ax_lines.append(line)
if '<releases>' in line:
in_releases = True
ax_lines.extend(release_tag_l)
with io.open(appdata_xml, 'w', encoding='UTF-8') as ax_fd:
ax_fd.write(''.join(ax_lines))
if __name__ == '__main__':
main() |
Python | wireshark/tools/update-tools-help.py | #!/usr/bin/env python3
#
# update-tools-help.py - Update the command line help output in docbook/wsug_src.
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
'''Update tools help
For each file that matches docbook/wsug_src/<command>-<flag>.txt, run
that command and flag. Update the file if the output differs.
'''
import argparse
import difflib
import glob
import io
import os
import re
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(description='Update Wireshark tools help')
parser.add_argument('-p', '--program-path', nargs=1, default=os.path.curdir, help='Path to Wireshark executables.')
args = parser.parse_args()
this_dir = os.path.dirname(__file__)
wsug_src_dir = os.path.join(this_dir, '..', 'docbook', 'wsug_src')
tools_help_files = glob.glob(os.path.join(wsug_src_dir, '*-*.txt'))
tools_help_files.sort()
tool_pat = re.compile('(\w+)(-\w).txt')
# If tshark is present, assume that our other executables are as well.
program_path = args.program_path[0]
if not os.path.isfile(os.path.join(program_path, 'tshark')):
print('tshark not found at {}\n'.format(program_path))
parser.print_usage()
sys.exit(1)
null_fd = open(os.devnull, 'w')
for thf in tools_help_files:
thf_base = os.path.basename(thf)
m = tool_pat.match(thf_base)
thf_command = os.path.join(program_path, m.group(1))
thf_flag = m.group(2)
if not os.path.isfile(thf_command):
print('{} not found. Skipping.'.format(thf_command))
continue
with io.open(thf, 'r', encoding='UTF-8') as fd:
cur_help = fd.read()
try:
new_help_data = subprocess.check_output((thf_command, thf_flag), stderr=null_fd)
except subprocess.CalledProcessError as e:
if thf_flag == '-h':
raise e
new_help = new_help_data.decode('UTF-8', 'replace')
cur_lines = cur_help.splitlines()
new_lines = new_help.splitlines()
# Assume we have an extended version. Strip it.
cur_lines[0] = re.split(' \(v\d+\.\d+\.\d+', cur_lines[0])[0]
new_lines[0] = re.split(' \(v\d+\.\d+\.\d+', new_lines[0])[0]
diff = list(difflib.unified_diff(cur_lines, new_lines))
if (len(diff) > 0):
print('Updating {} {}'.format(thf_command, thf_flag))
with io.open(thf, 'w', encoding='UTF-8') as fd:
fd.write(new_help)
else:
print('{} {} output unchanged.'.format(thf_command, thf_flag))
if __name__ == '__main__':
main() |
wireshark/tools/update-tx | #!/bin/bash
# Copyright 2015, Alexis La Goutte (See AUTHORS file)
#
# Resync translation between Gerrit repo and Transifex
NO_PUSH="False"
while getopts "n" OPTCHAR ; do
case $OPTCHAR in
n) NO_PUSH="True" ;;
*) printf "Unknown option: %s\\n" "$OPTARG"
esac
done
shift $((OPTIND - 1))
TOP_LEVEL=$(git rev-parse --show-toplevel)
if ! cd "$TOP_LEVEL" ; then
echo "Can't change to the top-level source directory."
exit 1
fi
LUPDATE_INCLUDES=(-I .)
while read -r ; do
LUPDATE_INCLUDES+=(-I "$REPLY")
done < <(find "$TOP_LEVEL/ui/qt" -type d)
# All .cpp, .h, and .ui files under ui/qt
LUPDATE_FILES=()
while read -r ; do
LUPDATE_FILES+=("$REPLY")
done < <(find ui/qt -name '*.cpp' -o -name '*.h' -o -name '*.ui')
# Add line numbers
for i in ui/qt/*.ts ; do
lupdate -locations absolute "${LUPDATE_INCLUDES[@]}" "${LUPDATE_FILES[@]}" -ts "$i"
done
# Get last translation for Transifex
tx pull -f
# Regenerate last translation for repo
for i in ui/qt/*.ts ; do
lupdate -locations absolute "${LUPDATE_INCLUDES[@]}" "${LUPDATE_FILES[@]}" -ts "$i"
done
# Push last change tranlastion on Transifex
if [ "$NO_PUSH" != "True" ]; then
tx push -t -s
fi
# Remove line numbers
for i in ui/qt/*.ts ; do
lupdate -locations none -no-ui-lines "${LUPDATE_INCLUDES[@]}" "${LUPDATE_FILES[@]}" -ts "$i"
done
#Add new commit with last translation update
#git commit -a -m "TX: Update Translations (sync)"
#Push update translation on Gerrit
#git push origin HEAD:refs/for/master/tx
#
# Editor modelines
#
# Local Variables:
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
#
# ex: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
# |
|
Shell Script | wireshark/tools/valgrind-wireshark.sh | #!/bin/bash
# A small script to export some variables and run tshark or wireshark in
# valgrind on a given capture file.
#
# Copyright 2012 Jeff Morriss <jeff.morriss.ws [AT] gmail.com>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
# Directory containing tshark or wireshark. Default: cmake run directory.
if [ -z "$WIRESHARK_BIN_DIR" ]; then
WIRESHARK_BIN_DIR=run
fi
# Use tshark by default
COMMAND=tshark
COMMAND_ARGS="-nr"
COMMAND_ARGS2=
VALID=0
PCAP=""
TOOL="memcheck"
while getopts ":2a:b:C:lmnpP:rstTYwcevWdG" OPTCHAR ; do
case $OPTCHAR in
2) COMMAND_ARGS="-2 $COMMAND_ARGS" ;;
a) ADDITIONAL_SUPPRESSION_FILE="$ADDITIONAL_SUPPRESSION_FILE --suppressions=$OPTARG" ;;
b) WIRESHARK_BIN_DIR=$OPTARG ;;
C) COMMAND_ARGS="-C $OPTARG $COMMAND_ARGS" ;;
l) LEAK_CHECK="--leak-check=full" ;;
m) TOOL="massif" ;;
n) COMMAND_ARGS="-v"
VALID=1 ;;
p) TOOL="callgrind" ;;
P) TOOL="callgrind"
CALLGRIND_OUT_FILE="--callgrind-out-file=$OPTARG" ;;
r) REACHABLE="--show-reachable=yes" ;;
s) GEN_SUPPRESSIONS="--gen-suppressions=yes" ;;
t) TRACK_ORIGINS="--track-origins=yes" ;;
T) COMMAND_ARGS="-Vx $COMMAND_ARGS" ;; # "build the Tree"
Y) COMMAND_ARGS="-Y frame $COMMAND_ARGS" ;; # Run with a read filter (but no tree)
w) COMMAND=wireshark
COMMAND_ARGS="-nr" ;;
c) COMMAND=capinfos
COMMAND_ARGS="" ;;
e) COMMAND=editcap
COMMAND_ARGS="-E 0.02"
# We don't care about the output of editcap
COMMAND_ARGS2="/dev/null" ;;
v) VERBOSE="--num-callers=256 -v" ;;
W) COMMAND=wireshark
COMMAND_ARGS=""
VALID=1 ;;
d) COMMAND=dumpcap
COMMAND_ARGS="-i eth1 -c 3000"
VALID=1 ;;
*) printf "Unknown option: %s\\n" "$OPTARG"
exit ;;
esac
done
shift $(( OPTIND - 1 ))
# Sanitize parameters
if [ "$COMMAND" != "tshark" ] && [[ $COMMAND_ARGS =~ Vx ]]
then
printf "\\nYou can't use -T if you're not using tshark\\n\\n" >&2
exit 1
fi
if [ $# -ge 1 ]
then
PCAP=$1
VALID=1
fi
if [ $VALID -eq 0 ]
then
printf "\\nUsage: %s [-2] [-a file] [-b bin_dir] [-c] [-e] [-C config_profile] " "$(basename "$0")"
printf "[-l] [-m] [-n] [-p] [-r] [-s] [-t] [-T] [-w] [-v] /path/to/file.pcap\\n"
printf "\\n"
printf "[-2]: run tshark with 2-pass analysis\\n"
printf "[-a]: additional valgrind suppression file\\n"
printf "[-b]: tshark binary dir\\n"
printf "[-e]: use 'editcap -E 0.02' instead of tshark\\n"
printf "[-c]: use capinfos instead of tshark\\n"
printf "[-C]: binary profile file\\n"
printf "[-l]: add valgrind option --leak-check=full\\n"
printf "[-m]: use valgrind massif tool\\n"
printf "[-n]: print binary version\\n"
printf "[-p]: use callgrind massif tool\\n"
printf "[-r]: add valgrind option --show-reachable=yes\\n"
printf "[-s]: add valgrind option --gen-suppressions=yes\\n"
printf "[-t]: add valgrind option --track-origins=yes\\n"
printf "[-T]: build the tshark tree (-Vx)\\n"
printf "[-w]: use wireshark instead of tshark\\n"
printf "[-v]: run in verbose mode (--num-callers=256)\\n"
exit 1
fi
if [ "$WIRESHARK_BIN_DIR" = "." ]; then
export WIRESHARK_RUN_FROM_BUILD_DIRECTORY=
fi
if [ "$TOOL" != "callgrind" ]; then
export WIRESHARK_DEBUG_WMEM_OVERRIDE=simple
export G_SLICE=always-malloc # or debug-blocks
fi
COMMAND="$WIRESHARK_BIN_DIR/$COMMAND"
cmdline="valgrind --suppressions=$( dirname "$0" )/vg-suppressions $ADDITIONAL_SUPPRESSION_FILE \
--tool=$TOOL $CALLGRIND_OUT_FILE $VERBOSE $LEAK_CHECK $REACHABLE $GEN_SUPPRESSIONS $TRACK_ORIGINS \
$COMMAND $COMMAND_ARGS $PCAP $COMMAND_ARGS2"
if [ "$VERBOSE" != "" ];then
echo -e "\\n$cmdline\\n"
fi
# shellcheck disable=SC2086
exec $cmdline > /dev/null |
Shell Script | wireshark/tools/validate-clang-check.sh | #!/bin/bash
# Copyright 2018, Alexis La Goutte (See AUTHORS file)
#
# Verifies last commit with clang-check (like scan-build) for Petri Dish
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
COMMIT_FILES=$( git diff-index --cached --name-status HEAD^ | grep -v "^D" | cut -f2 | grep "\\.c$\|cpp$" )
CLANG_CHECK_CMD=clang-check
while getopts c: OPTCHAR
do
case $OPTCHAR in
c)
CLANG_CHECK_CMD="clang-check-$OPTARG"
;;
*)
echo "Usage: $( basename "$0" ) [ -c <clang version> ]"
exit 0
esac
done
for FILE in $COMMIT_FILES; do
# Skip some special cases
FILE_BASENAME="$( basename "$FILE" )"
# If we don't have a build rule for this file, it's probably because we're missing
# necessary includes.
for BUILD_RULE_FILE in compile_commands.json build.ninja ; do
if [[ -f $BUILD_RULE_FILE ]] && ! grep "/$FILE_BASENAME\." $BUILD_RULE_FILE &> /dev/null ; then
echo "Don't know how to build $FILE_BASENAME. Skipping."
continue 2
fi
done
# wsutil/file_util.c is Windows-only.
if test "$FILE_BASENAME" = "file_util.c"
then
continue
fi
# iLBC: the file is not even compiled when ilbc is not installed
if test "$FILE_BASENAME" = "iLBCdecode.c"
then
continue
fi
# This is a template file, not a final '.c' file.
if echo "$FILE_BASENAME" | grep -Eq "packet-.*-template.c"
then
continue
fi
"$CLANG_CHECK_CMD" "../$FILE"
"$CLANG_CHECK_CMD" -analyze "../$FILE"
done |
Python | wireshark/tools/validate-commit.py | #!/usr/bin/env python3
# Verifies whether commit messages adhere to the standards.
# Checks the author name and email and invokes the tools/commit-msg script.
# Copy this into .git/hooks/post-commit
#
# Copyright (c) 2018 Peter Wu <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
from __future__ import print_function
import argparse
import difflib
import json
import os
import subprocess
import sys
import tempfile
import urllib.request
import re
parser = argparse.ArgumentParser()
parser.add_argument('commit', nargs='?', default='HEAD',
help='Commit ID to be checked (default %(default)s)')
parser.add_argument('--commitmsg', help='commit-msg check', action='store')
def print_git_user_instructions():
print('To configure your name and email for git, run:')
print('')
print(' git config --global user.name "Your Name"')
print(' git config --global user.email "[email protected]"')
print('')
print('After that update the author of your latest commit with:')
print('')
print(' git commit --amend --reset-author --no-edit')
print('')
def verify_name(name):
name = name.lower().strip()
forbidden_names = ('unknown', 'root', 'user', 'your name')
if name in forbidden_names:
return False
# Warn about names without spaces. Sometimes it is a mistake where the
# developer accidentally committed using the system username.
if ' ' not in name:
print("WARNING: name '%s' does not contain a space." % (name,))
print_git_user_instructions()
return True
def verify_email(email):
email = email.lower().strip()
try:
user, host = email.split('@')
except ValueError:
# Lacks a '@' (e.g. a plain domain or "foo[AT]example.com")
return False
tld = host.split('.')[-1]
# localhost, localhost.localdomain, my.local etc.
if 'local' in tld:
return False
# Possibly an IP address
if tld.isdigit():
return False
# forbid code.wireshark.org. Submissions could be submitted by other
# addresses if one would like to remain anonymous.
if host.endswith('.wireshark.org'):
return False
# For documentation purposes only.
if host == 'example.com':
return False
# 'peter-ubuntu32.(none)'
if '(none)' in host:
return False
return True
def tools_dir():
if __file__.endswith('.py'):
# Assume direct invocation from tools directory
return os.path.dirname(__file__)
# Otherwise it is a git hook. To support git worktrees, do not manually look
# for the .git directory, but query the actual top level instead.
cmd = ['git', 'rev-parse', '--show-toplevel']
srcdir = subprocess.check_output(cmd, universal_newlines=True).strip()
return os.path.join(srcdir, 'tools')
def extract_subject(subject):
'''Extracts the original subject (ignoring the Revert prefix).'''
subject = subject.rstrip('\r\n')
prefix = 'Revert "'
suffix = '"'
while subject.startswith(prefix) and subject.endswith(suffix):
subject = subject[len(prefix):-len(suffix)]
return subject
def verify_body(body):
bodynocomments = re.sub('^#.*$', '', body, flags=re.MULTILINE)
old_lines = bodynocomments.splitlines(True)
is_good = True
if len(old_lines) >= 2 and old_lines[1].strip():
print('ERROR: missing blank line after the first subject line.')
is_good = False
cleaned_subject = extract_subject(old_lines[0])
if len(cleaned_subject) > 80:
# Note that this check is also invoked by the commit-msg hook.
print('Warning: keep lines in the commit message under 80 characters.')
is_good = False
if not is_good:
print('''
Please rewrite your commit message to our standards, matching this format:
component: a very brief summary of the change
A commit message should start with a brief summary, followed by a single
blank line and an optional longer description. If the change is specific to
a single protocol, start the summary line with the abbreviated name of the
protocol and a colon.
Use paragraphs to improve readability. Limit each line to 80 characters.
''')
if any(line.startswith('Bug:') or line.startswith('Ping-Bug:') for line in old_lines):
sys.stderr.write('''
To close an issue, use "Closes #1234" or "Fixes #1234" instead of "Bug: 1234".
To reference an issue, use "related to #1234" instead of "Ping-Bug: 1234". See
https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically
for details.
''')
return False
# Cherry-picking can add an extra newline, which we'll allow.
cp_line = '\n(cherry picked from commit'
body = body.replace('\n' + cp_line, cp_line)
try:
cmd = ['git', 'stripspace']
newbody = subprocess.check_output(cmd, input=body, universal_newlines=True)
except OSError as ex:
print('Warning: unable to invoke git stripspace: %s' % (ex,))
return is_good
if newbody != body:
new_lines = newbody.splitlines(True)
diff = difflib.unified_diff(old_lines, new_lines,
fromfile='OLD/.git/COMMIT_EDITMSG',
tofile='NEW/.git/COMMIT_EDITMSG')
# Clearly mark trailing whitespace (GNU patch supports such comments).
diff = [
'# NOTE: trailing space on the next line\n%s' % (line,)
if len(line) > 2 and line[-2].isspace() else line
for line in diff
]
print('The commit message does not follow our standards.')
print('Please rewrite it (there are likely whitespace issues):')
print('')
print(''.join(diff))
return False
return is_good
def verify_merge_request():
# Not needed if/when https://gitlab.com/gitlab-org/gitlab/-/issues/23308 is fixed.
gitlab_api_pfx = "https://gitlab.com/api/v4"
# gitlab.com/wireshark/wireshark = 7898047
project_id = os.getenv('CI_MERGE_REQUEST_PROJECT_ID')
ansi_csi = '\x1b['
ansi_codes = {
'black_white': ansi_csi + '30;47m',
'bold_red': ansi_csi + '31;1m', # gitlab-runner errors
'reset': ansi_csi + '0m'
}
m_r_iid = os.getenv('CI_MERGE_REQUEST_IID')
if project_id is None or m_r_iid is None:
print("This doesn't appear to be a merge request. CI_MERGE_REQUEST_PROJECT_ID={}, CI_MERGE_REQUEST_IID={}".format(project_id, m_r_iid))
return True
m_r_url = '{}/projects/{}/merge_requests/{}'.format(gitlab_api_pfx, project_id, m_r_iid)
req = urllib.request.Request(m_r_url)
# print('req', repr(req), m_r_url)
with urllib.request.urlopen(req) as resp:
resp_json = resp.read().decode('utf-8')
# print('resp', resp_json)
m_r_attrs = json.loads(resp_json)
try:
if not m_r_attrs['allow_collaboration']:
print('''\
{bold_red}ERROR:{reset} Please edit your merge request and make sure the setting
{black_white}✅ Allow commits from members who can merge to the target branch{reset}
is checked so that maintainers can rebase your change and make minor edits.\
'''.format(**ansi_codes))
return False
except KeyError:
sys.stderr.write('This appears to be a merge request, but we were not able to fetch the "Allow commits" status\n')
return True
def main():
args = parser.parse_args()
commit = args.commit
# If called from commit-msg script, just validate that part and return.
if args.commitmsg:
try:
with open(args.commitmsg) as f:
return 0 if verify_body(f.read()) else 1
except:
print("Couldn't verify body of message from file '", + args.commitmsg + "'");
return 1
if(os.getenv('CI_MERGE_REQUEST_EVENT_TYPE') == 'merge_train'):
print("If we were on the love train, people all over the world would be joining hands for this merge request.\nInstead, we're on a merge train so we're skipping commit validation checks. ")
return 0
cmd = ['git', 'show', '--no-patch',
'--format=%h%n%an%n%ae%n%B', commit, '--']
output = subprocess.check_output(cmd, universal_newlines=True)
# For some reason there is always an additional LF in the output, drop it.
if output.endswith('\n\n'):
output = output[:-1]
abbrev, author_name, author_email, body = output.split('\n', 3)
subject = body.split('\n', 1)[0]
# If called directly (from the tools directory), print the commit that was
# being validated. If called from a git hook (without .py extension), try to
# remain silent unless there are issues.
if __file__.endswith('.py'):
print('Checking commit: %s %s' % (abbrev, subject))
exit_code = 0
if not verify_name(author_name):
print('Disallowed author name: {}'.format(author_name))
exit_code = 1
if not verify_email(author_email):
print('Disallowed author email address: {}'.format(author_email))
exit_code = 1
if exit_code:
print_git_user_instructions()
if not verify_body(body):
exit_code = 1
if not verify_merge_request():
exit_code = 1
return exit_code
if __name__ == '__main__':
try:
sys.exit(main())
except subprocess.CalledProcessError as ex:
print('\n%s' % ex)
sys.exit(ex.returncode)
except KeyboardInterrupt:
sys.exit(130) |
Shell Script | wireshark/tools/validate-diameter-xml.sh | #!/bin/bash
# A small script to run xmllint on the Diameter XML files (after doing some
# fixups to those files).
#
# Copyright 2016 Jeff Morriss <jeff.morriss.ws [AT] gmail.com>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
# SPDX-License-Identifier: GPL-2.0-or-later
if ! type -p sed > /dev/null
then
echo "'sed' is needed to run $0." 1>&2
# Exit cleanly because we don't want pre-commit to fail just because
# someone doesn't have the tools...
exit 0
fi
if ! type -p xmllint > /dev/null
then
echo "'xmllint' is needed to run $0." 1>&2
# Exit cleanly because we don't want pre-commit to fail just because
# someone doesn't have the tools...
exit 0
fi
src_dir="$(dirname "$0")/.."
diameter_dir="$src_dir/resources/protocols/diameter"
# Ideally this would work regardless of our cwd
if [ ! -r "$diameter_dir/dictionary.xml" ]
then
echo "Couldn't find $diameter_dir/dictionary.xml" 1>&2
exit 1
fi
if [ ! -r "$diameter_dir/dictionary.dtd" ]
then
echo "Couldn't find $diameter_dir/dictionary.dtd" 1>&2
exit 1
fi
if ! tmpdir=$(mktemp -d); then
echo "Could not create temporary directory" >&2
exit 1
fi
trap 'rm -rf "$tmpdir"' EXIT
# First edit all the AVP names that start with "3GPP" to indicate "TGPP".
# XML doesn't allow ID's to start with a digit but:
# 1) We don't *really* care if it's valid XML
# 2) (but) we do want to use xmllint to find problems
# 3) (and) users see the AVP names. Showing them "TGPP" instead of "3GPP"
# is annoying enough to warrant this extra work.
# Declare and populate associative exceptions array
declare -A exceptions=(
["3GPP"]="TGPP"
["5QI"]="FiveQI"
)
# Loop through the exceptions, building the sed options
sedopts=
for e in ${!exceptions[@]}; do
sedopts="${sedopts}s/name=\"$e/name=\"${exceptions[$e]}/;"
done
# Delete the last character, i.e., the trailing semicolon
sedopts=${sedopts%?}
cp "$diameter_dir/dictionary.dtd" "$tmpdir" || exit 1
for f in "$diameter_dir"/*.xml
do
sed "${sedopts}" "$f" > "$tmpdir/${f##*/}" || exit 1
done
xmllint --noout --noent --postvalid "$tmpdir/dictionary.xml" &&
echo "Diameter dictionary is (mostly) valid XML."
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 8
# tab-width: 8
# indent-tabs-mode: t
# End:
#
# vi: set shiftwidth=8 tabstop=8 noexpandtab:
# :indentSize=8:tabSize=8:noTabs=false:
# |
wireshark/tools/vg-suppressions | # This file lists suppressions to hide valgrind errors in libraries we don't
# control. Be careful adding to it, since overly-broad suppressions may hide
# real errors in Wireshark!
#
# This is primarily targeted towards the set of libraries on the fuzz-bot (which
# runs a valgrind step) but other entries are welcome as long as they are
# sufficiently commented.
{
Libgcrypt leak (gcry_control)
Memcheck:Leak
match-leak-kinds: reachable
fun:malloc
...
fun:gcry_control
fun:epan_init
fun:main
}
{
Libgcrypt leak (gcry_check_version)
Memcheck:Leak
match-leak-kinds: reachable
fun:malloc
...
fun:epan_get_runtime_version_info
fun:get_tshark_runtime_version_info
fun:get_runtime_version_info
fun:main
}
{
Glib Leak (g_get_charset)
Memcheck:Leak
match-leak-kinds: reachable
fun:*alloc
...
fun:g_get_charset
}
{
Glib Leak (g_get_filename_charsets)
Memcheck:Leak
match-leak-kinds: reachable
fun:*alloc
...
fun:g_get_filename_charsets
}
{
Glib Leak (g_strerror)
Memcheck:Leak
match-leak-kinds: reachable
fun:*alloc
...
fun:g_strerror
}
{
Glib leak (g_get_home_dir)
Memcheck:Leak
match-leak-kinds: reachable
...
fun:g_get_home_dir
}
{
Glib leak (get_global_random) - requires glib debug symbols
Memcheck:Leak
match-leak-kinds: reachable
...
fun:get_global_random
fun:g_random_*
}
{
Glib leak (g_get_user_config_dir)
Memcheck:Leak
match-leak-kinds: reachable
...
fun:g_get_user_config_dir
}
{
Glib leak (g_module)
Memcheck:Leak
match-leak-kinds: reachable
...
fun:g_module_*
...
}
{
Glib leak (g_private_get) - requires glib debugging symbols installed
Memcheck:Leak
match-leak-kinds: reachable
fun:malloc
...
fun:g_private_get*
}
{
Glib leak (g_log)
Memcheck:Leak
match-leak-kinds: reachable
fun:malloc
fun:g_malloc
...
fun:g_log_set_handler_full
}
{
Libc and GLib leak (dl_init)
Memcheck:Leak
fun:*alloc
...
fun:call_init.part.0
...
fun:_dl_init
} |
|
wireshark/tools/win-setup.ps1 | #
# win-setup - Prepare a Windows development environment for building Wireshark.
#
# Copyright 2015 Gerald Combs <[email protected]>
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#requires -version 2
# To do:
# - Use Expand-Archive instead of `cmake -E tar`? That requires PS >= 5.0
<#
.SYNOPSIS
Prepare a Windows development environment for building Wireshark.
.DESCRIPTION
This script downloads and extracts third-party libraries required to compile
Wireshark.
.PARAMETER Destination
Specifies the destination directory for the text files. The path must
contain the pattern "wireshark-*-libs".
.PARAMETER Platform
Target platform. Must be one of "win64" or "arm64".
.PARAMETER CMakeExecutable
Specifies the path to the CMake executable, which is used to extract archives.
.INPUTS
-Destination Destination directory.
-Platform Target platform.
-CMakeExecutable Path to CMake.
.OUTPUTS
A set of libraries required to compile Wireshark on Windows, along with
their compressed archives.
A manifest file (library-manifest.xml)
.EXAMPLE
C:\PS> .\tools\win-setup.ps1 -Destination C:\wireshark-master-64-libs -Platform x64
#>
Param(
[Parameter(Mandatory=$true, Position=0)]
[ValidateScript({$_ -like "*[/\]wireshark-*-libs"})]
[String]
$Destination,
[Parameter(Mandatory=$true, Position=1)]
[ValidateSet("x64", "arm64")]
[String]
$Platform,
[Parameter(Mandatory=$false, Position=3)]
[ValidateScript({$_ | Test-Path -Type leaf })]
[String]
$CMakeExecutable = "CMake"
)
# Variables
# We create and delete files and directories. Bail out at the first sign of
# trouble instead of trying to catch exceptions everywhere.
$ErrorActionPreference = "Stop"
# Archive file / SHA256
$X64Archives = @{
"AirPcap/AirPcap_Devpack_4_1_0_1622.zip" = "09d637f28a79b1d2ecb09f35436271a90c0f69bd0a1ee82b803abaaf63c18a69";
"bcg729/bcg729-1.0.4-win64ws.zip" = "9a095fda4c39860d96f0c568830faa6651cd17635f68e27aa6de46c689aa0ee2";
"brotli/brotli-1.0.9-1-win64ws.zip" = "3f8d24aec8668201994327ff8d8542fe507d1d468a500a1aec50d0415f695aab";
"c-ares/c-ares-1.18.1-1-win64ws.zip" = "61183970996150e2eb137dfa7f5842ffa6e0eec2819634d5bdadc84013f8411d";
"gnutls/gnutls-3.7.9-1-x64-mingw-dynamic-ws.zip" = "d60148df60ac8dfde59bc89d4141fe3ee5735a0bd53f7d28dbb3f6f69149c23a";
"krb5/krb5-1.20.1-1-x64-windows-ws.zip" = "a1e5c582afce6e2f72f0f5bd66df2c0f3cc984532a1da5314fc89d7b7f29cdbf";
"libgcrypt/libgcrypt-1.10.2-2-x64-mingw-dynamic-ws.zip" = "477cfce91d791b34df75a5ad83626f1ac2ee147eff7965e52266a4fc3da0f920";
"libilbc/libilbc-2.0.2-4-x64-windows-ws.zip" = "4f35a1ffa03c89bf473f38249282a7867b203988d2b6d3d2f0924764619fd5f5";
"libmaxminddb/libmaxminddb-1.4.3-1-win64ws.zip" = "ee89944a19ab6e1c873bdecb9fc6205d317c41e6da6ec1d30bc892fddfd143da";
"libpcap/libpcap-1.10.1-1-win64ws.zip" = "59f8e0e90a3ab5671df561266ed2b02870a6f8f3a895b80c9db19fea9a12ffb2";
"libsmi/libsmi-2021-01-15-1-x64-windows-ws.zip" = "54a40c061132edaf1725a6141f88e02f607a90c3d577eacae7f8c16c7757a84c";
"libssh/libssh-0.10.5-1-x64-mingw-dynamic-ws.zip" = "9c1410d1033a540d118e17938905144956291b4c6ca7a9b7af6959b2632a1aaa";
"lua/lua-5.2.4-unicode-win64-vc14.zip" = "e8968d2c7871ce1ea82cbd29ac1b3a2c59d3dec25e483c5e12de85df66f5d928";
"lz4/lz4-1.9.3-1-win64ws.zip" = "7129515893ffdc439f4ffe9673c4bc43f9042e910bb2607e68dde6b99a1ab058";
"minizip/minizip-1.2.11-4-win64ws.zip" = "dd6bf24e2d946465ad19aa4f8c38e0db91da6585887935de68011982cd6fb2cb";
"nghttp2/nghttp2-1.49.0-1-win64ws.zip" = "215919ec20be62101d4704ec2464bfb72c5677126c5245b92ba495a3d30642ca";
"opus/opus-1.3.1-3-win64ws.zip" = "1f7a55a6d2d7215dffa4a43bca8ca05024bd4ba1ac3d0d0c405fd38b09cc2205";
"pcre2/pcre2-10.40-1-win64ws.zip" = "17eee615990b23bc859a862c19f5ac10c61776587603bc452285abe073a0fad9";
"sbc/sbc-2.0-1-x64-windows-ws.zip" = "d1a58f977dcffa168b11b280bd10228191582d263b7c901e50cde7c1c43d9c04";
"snappy/snappy-1.1.9-1-win64ws.zip" = "fa907724be019bcc55d27ebe88257ba8898b5c38b719099b8164ac78600d81cc";
"spandsp/spandsp-0.0.6-5-x64-windows-ws.zip" = "cbb18310876ec6f081662253a2d37f5174ac60c58b0b7cd6759852fbcfaa7d7f";
"speexdsp/speexdsp-1.21.1-1-win64ws.zip" = "d36db62e64ffaee38d9f607bef07d3778d8957ad29757f3eba169eb135f1a4e5";
"vcpkg-export/vcpkg-export-20220726-1-win64ws.zip" = "b1eaa8124802532fa8d30789219906f90fb80908844e4458327b3f73995a44b0";
"WinSparkle/WinSparkle-0.8.0-4-gb320893.zip" = "3ae42326bcd34594bc21b1e7948863a839ee76e87d9f4cf6b59b9d9f9a083881";
"zstd/zstd-1.5.2-1-win64ws.zip" = "d920afe636951cfcf144824d9c075d1f2c13387f4739152fe185fd9c09fc58f2";
}
$Arm64Archives = @{
"bcg729/bcg729-1.1.1-1-win64armws.zip" = "f4d76b9acf0d0e12e87a020e9805d136a0e8775e061eeec23910a10828153625";
"brotli/brotli-1.0.9-1-win64armws.zip" = "5ba1b62ebc514d55c3eae85a00ff107e587b6e7cb1275e2d33fcddcd49f8e2af";
"c-ares/c-ares-1.19.0-1-win64armws.zip" = "3e02db0c77303fcd5e9b85f2abe7b48ed79b0ed5d3bdada291a71842e91a6215";
"gnutls/gnutls-3.7.9-1-arm64-mingw-dynamic-ws.zip" = "932f07fbb33bf1125dbd7be2806cd0e84fd3fc957f3dbc1245b47699d10982c7";
"krb5/krb5-1.20.1-1-arm64-windows-ws.zip" = "6afe3185ea7621224544683a89d7c724d32bef6f1b552738dbc713ceb2151437";
"libgcrypt/libgcrypt-1.10.2-2-arm64-mingw-dynamic-ws.zip" = "cd42fa2739a204e129d655e1b0dda83ceb27399812b8b2eccddae4a9ecd8d0ce";
"libilbc/libilbc-2.0.2-4-arm64-windows-ws.zip" = "00a506cc1aac8a2e31856e463a555d899b5a6ccf376485a124104858ccf0be6d";
"libmaxminddb/libmaxminddb-1.4.3-1-win64armws.zip" = "9996327f301cb4a4de797bc024ad0471acd95c1850a2afc849c57fcc93360610";
"libpcap/libpcap-1.10.1-1-win64armws.zip" = "c0c5d42d96cc407303d71ba5afd06615c660228fa2260d7ecbc8453140529137";
"libsmi/libsmi-2021-01-15-1-arm64-windows-ws.zip" = "938249047575aaab2a4f99945b44a46fe3e7190f7a75d6e985d8a6f49dec9ceb";
"libssh/libssh-0.10.5-1-arm64-mingw-dynamic-ws.zip" = "b99c9573d9a30ba2898ce6ac131b23b1699009761d5dbe351a1a958cca0f85ca";
"lua/lua-5.2.4-unicode-arm64-windows-vc17.zip" = "5848e23352e35b69f4cdabaca3754c2c5fb11e5461bb92b71e059e558e4b2d12";
"lz4/lz4-1.9.4-1-win64armws.zip" = "59a3ed3f9161be7614a89afd2ca21c43f26dd916afd4aa7bfdc4b148fb10d485";
"minizip/minizip-1.2.13-1-win64armws.zip" = "b1e79d8feb01b89cebc1e9fed7765d29f5eb412d11bfcf07217fb645863deb2c";
"nghttp2/nghttp2-1.51.0-1-win64armws.zip" = "ede5c53fd46ab12b15ff9758cdc2891731bc1475c589681aa10e6aaf2217656c";
"opus/opus-1.4-1-win64armws.zip" = "51d10381360d5691b2022dde5b284266d9b0ce9a3c9bd7e86f9a4ff1a4f7d904";
"pcre2/pcre2-10.40-1-win64armws.zip" = "e8fc7542845900e7dbecfa4a10d7ec17edf72bc0e8d433268bee111f1d4947d3";
"sbc/sbc-2.0-1-arm64-windows-ws.zip" = "83cfe4a8b6fa5bae253ecacc1c02e6e4c61b4ad9ad0e5e63f0f30422fb6eac96";
"snappy/snappy-1.1.9-1-win64armws.zip" = "f3f6ec841024d18df06934ff70f44068a4e8f1008eca1f363257645647f74d4a";
"spandsp/spandsp-0.0.6-5-arm64-windows-ws.zip" = "fdf01e3c33e739ff9399b7d42cd8230c97cb27ce51865a0f06285a8f68206b6c";
"speexdsp/speexdsp-1.2.1-1-win64armws.zip" = "1759a9193065f27e50dd79dbb1786d24031ac43ccc48c40dca46d8a48552e3bb";
"vcpkg-export/vcpkg-export-20230502-1-win64armws.zip" = "94bc2d98bcb86e79569c7bf638cde8d63175cd65cf07cc219890cdc713707ce9";
"WinSparkle/WinSparkle-0.8.0-4-gb320893.zip" = "3ae42326bcd34594bc21b1e7948863a839ee76e87d9f4cf6b59b9d9f9a083881";
"zstd/zstd-1.5.5-1-win64armws.zip" = "0e448875380cc5d5f5539d994062201bfa564e4a27466bc3fdfec84d9008e51d";
}
# Subdirectory to extract an archive to
$ArchivesSubDirectory = @{
"AirPcap/AirPcap_Devpack_4_1_0_1622.zip" = "AirPcap_Devpack_4_1_0_1622";
}
# Plain file downloads
$X64Files = @{
# Nothing here
}
$Arm64Files = @{
# Nothing here
}
$Archives = $X64Archives;
$Files = $X64Files;
if ($Platform -eq "arm64") {
$Archives = $Arm64Archives;
$Files = $Arm64Files;
}
$CurrentManifest = $Archives + $Files
$CleanupItems = @(
"bcg729-1.0.4-win??ws"
"brotli-1.0.*-win??ws"
"c-ares-1.9.1-1-win??ws"
"c-ares-1.1*-win??ws"
"gnutls-3.?.*-*-win??ws"
"krb5-*-win??ws"
"libgcrypt-*-win??ws"
"libilbc-2.0.2-3-win??ws"
"libmaxminddb-1.4.3-1-win??ws"
"libpcap-1.9.1-1-win??ws"
"libsmi-0.4.8"
"libsmi-svn-40773-win??ws"
"libssh-0.*-win??ws"
"libxml2-*-win??ws"
"lua5.1.4"
"lua5.2.?"
"lua5.2.?-win??"
"lua-5.?.?-unicode-win??-vc??"
"lz4-*-win??ws"
"MaxMindDB-1.3.2-win??ws"
"minizip-*-win??ws"
"nghttp2-*-win??ws"
"opus-1.3.1-?-win??ws"
"pcre2-*-win??ws"
"sbc-1.3-win??ws"
"snappy-1.1.*-win??ws"
"spandsp-0.0.6-win??ws"
"speexdsp-*-win??ws"
"user-guide"
"vcpkg-export-*-win??ws"
"zstd-*-win??ws"
"AirPcap_Devpack_4_1_0_1622"
"WinSparkle-0.3-44-g2c8d9d3-win??ws"
"WinSparkle-0.5.?"
"current-tag.txt"
"library-manifest.xml"
)
# The dev-libs site repository is at
# https://gitlab.com/wireshark/wireshark-development-libraries
[Uri] $DownloadPrefix = "https://dev-libs.wireshark.org/windows/packages"
$proxy = $null
# Functions
# Verifies the contents of a file against a SHA256 hash.
# Returns success (0) if the file exists and verifies.
# Returns error (1) if the file does not exist.
# Returns error (2) if the integrity check fails (an error is also printed).
function VerifyIntegrity($filename, $hash) {
# Use absolute path because PS and .NET may have different working directories.
$filepath = Convert-Path -Path $filename -ErrorAction SilentlyContinue
if (-not ($filepath)) {
return 1
}
# may throw due to permission error, I/O error, etc.
try { $stream = [IO.File]::OpenRead($filepath) } catch { throw }
try {
$sha256 = New-Object Security.Cryptography.SHA256Managed
$binaryHash = $sha256.ComputeHash([IO.Stream]$stream)
$hexHash = ([System.BitConverter]::ToString($binaryHash) -Replace "-").ToLower()
$hash = $hash.ToLower()
if ($hexHash -ne $hash) {
Write-Warning "$($filename): computed file hash $hexHash did NOT match $hash"
return 2
}
return 0
} finally {
$stream.Close()
}
}
# Downloads a file and checks its integrity. If a corrupt file already exists,
# it is removed and re-downloaded. Succeeds only if the SHA256 hash matches.
function DownloadFile($fileName, $fileHash, [Uri] $fileUrl = $null) {
if ([string]::IsNullOrEmpty($fileUrl)) {
$fileUrl = "$DownloadPrefix/$fileName"
}
$destinationFile = "$Destination\" + [string](Split-Path -Leaf $fileName)
if (Test-Path $destinationFile -PathType 'Leaf') {
if ((VerifyIntegrity $destinationFile $fileHash) -ne 0) {
Write-Output "$fileName is corrupt, removing and retrying download."
Remove-Item $destinationFile
} else {
Write-Output "$fileName already there; not retrieving."
return
}
}
if (-not ($Script:proxy)) {
$Script:proxy = [System.Net.WebRequest]::GetSystemWebProxy()
$Script:proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials
}
Write-Output "Downloading $fileUrl into $Destination"
$webClient = New-Object System.Net.WebClient
$webClient.proxy = $Script:proxy
$webClient.DownloadFile($fileUrl, "$destinationFile")
Write-Output "Verifying $destinationFile"
if ((VerifyIntegrity $destinationFile $fileHash) -ne 0) {
Write-Output "Download is corrupted, aborting!"
exit 1
}
}
function DownloadArchive($fileName, $fileHash, $subDir) {
DownloadFile $fileName $fileHash
$archiveFile = "$Destination\" + [string](Split-Path -Leaf $fileName)
$archiveDir = "$Destination\$subDir"
if ($subDir -and -not (Test-Path $archiveDir -PathType 'Container')) {
New-Item -ItemType Directory -Path $archiveDir > $null
}
$activity = "Extracting into $($archiveDir)"
Write-Progress -Activity "$activity" -Status "Extracting $archiveFile using CMake ..."
Push-Location "$archiveDir"
& "$CMakeExecutable" -E tar xf "$archiveFile" 2>&1 | Set-Variable -Name CMakeOut
$cmStatus = $LASTEXITCODE
Pop-Location
Write-Progress -Activity "$activity" -Status "Done" -Completed
if ($cmStatus -gt 0) {
Write-Output $CMakeOut
exit 1
}
}
# On with the show
# Make sure $Destination exists and do our work there.
if ( -not (Test-Path $Destination -PathType 'Container') ) {
New-Item -ItemType 'Container' "$Destination" > $null
}
# CMake's file TO_NATIVE_PATH passive-aggressively omits the drive letter.
Set-Location "$Destination"
$Destination = $(Get-Item -Path ".\")
Write-Output "Working in $Destination"
# Check our last known state
$destinationManifest = @{ "INVALID" = "INVALID" }
$manifestFile = "library-manifest.xml"
if ((Test-Path $manifestFile -PathType 'Leaf') -and -not ($Force)) {
$destinationManifest = Import-Clixml $manifestFile
}
function ManifestList($manifestHash) {
$manifestHash.keys | Sort | ForEach-Object { "$_ : $($manifestHash[$_])" }
}
if (Compare-Object -ReferenceObject (ManifestList($destinationManifest)) -DifferenceObject (ManifestList($CurrentManifest))) {
Write-Output "Current library manifest not found. Refreshing."
$activity = "Removing directories"
foreach ($oldItem in $CleanupItems) {
if (Test-Path $oldItem) {
Write-Progress -Activity "$activity" -Status "Removing $oldItem"
Remove-Item -force -recurse $oldItem
}
}
Write-Progress -Activity "$activity" -Status "Done" -Completed
} else {
Write-Output "Current library manifest found. Skipping download."
exit 0
}
# Download files
foreach ($item in $Files.GetEnumerator() | Sort-Object -property key) {
DownloadFile $item.Name $item.Value
}
# Download and extract archives
foreach ($item in $Archives.GetEnumerator() | Sort-Object -property key) {
$subDir = $ArchivesSubDirectory[$item.Name]
DownloadArchive $item.Name $item.Value $subDir
}
# Save our last known state
$CurrentManifest | Export-Clixml -Path $manifestFile -Encoding utf8 |
|
Python | wireshark/tools/WiresharkXML.py | """
Routines for reading PDML produced from TShark.
Copyright (c) 2003, 2013 by Gilbert Ramirez <[email protected]>
SPDX-License-Identifier: GPL-2.0-or-later
"""
import sys
import xml.sax
from xml.sax.saxutils import quoteattr
import cStringIO as StringIO
class CaptureFile:
pass
class FoundItException(Exception):
"""Used internally for exiting a tree search"""
pass
class PacketList:
"""Holds Packet objects, and has methods for finding
items within it."""
def __init__(self, children=None):
if children is None:
self.children = []
else:
self.children = children
def __getitem__(self, index):
"""We act like a list."""
return self.children[index]
def __len__(self):
return len(self.children)
def item_exists(self, name):
"""Does an item with name 'name' exist in this
PacketList? Returns True or False."""
for child in self.children:
if child.name == name:
return True
try:
for child in self.children:
child._item_exists(name)
except FoundItException:
return True
return False
def _item_exists(self, name):
for child in self.children:
if child.name == name:
raise FoundItException
child._item_exists(name)
def get_items(self, name, items=None):
"""Return all items that match the name 'name'.
They are returned in order of a depth-first-search."""
if items is None:
top_level = 1
items = []
else:
top_level = 0
for child in self.children:
if child.name == name:
items.append(child)
child.get_items(name, items)
if top_level:
return PacketList(items)
def get_items_before(self, name, before_item, items=None):
"""Return all items that match the name 'name' that
exist before the before_item. The before_item is an object.
They results are returned in order of a depth-first-search.
This function allows you to find fields from protocols that occur
before other protocols. For example, if you have an HTTP
protocol, you can find all tcp.dstport fields *before* that HTTP
protocol. This helps analyze in the presence of tunneled protocols."""
if items is None:
top_level = 1
items = []
else:
top_level = 0
for child in self.children:
if top_level == 1 and child == before_item:
break
if child.name == name:
items.append(child)
# Call get_items because the 'before_item' applies
# only to the top level search.
child.get_items(name, items)
if top_level:
return PacketList(items)
class ProtoTreeItem(PacketList):
def __init__(self, xmlattrs):
PacketList.__init__(self)
self.name = xmlattrs.get("name", "")
self.showname = xmlattrs.get("showname", "")
self.pos = xmlattrs.get("pos", "")
self.size = xmlattrs.get("size", "")
self.value = xmlattrs.get("value", "")
self.show = xmlattrs.get("show", "")
self.hide = xmlattrs.get("hide", "")
def add_child(self, child):
self.children.append(child)
def get_name(self):
return self.name
def get_showname(self):
return self.showname
def get_pos(self):
return self.pos
def get_size(self):
return self.size
def get_value(self):
return self.value
def get_show(self):
return self.show
def get_hide(self):
return self.hide
def dump(self, fh=sys.stdout):
if self.name:
print >> fh, " name=%s" % (quoteattr(self.name),),
if self.showname:
print >> fh, "showname=%s" % (quoteattr(self.showname),),
if self.pos:
print >> fh, "pos=%s" % (quoteattr(self.pos),),
if self.size:
print >> fh, "size=%s" % (quoteattr(self.size),),
if self.value:
print >> fh, "value=%s" % (quoteattr(self.value),),
if self.show:
print >> fh, "show=%s" % (quoteattr(self.show),),
if self.hide:
print >> fh, "hide=%s" % (quoteattr(self.hide),),
class Packet(ProtoTreeItem, PacketList):
def dump(self, fh=sys.stdout, indent=0):
print >> fh, " " * indent, "<packet>"
indent += 1
for child in self.children:
child.dump(fh, indent)
print >> fh, " " * indent, "</packet>"
class Protocol(ProtoTreeItem):
def dump(self, fh=sys.stdout, indent=0):
print >> fh, "%s<proto " % (" " * indent,),
ProtoTreeItem.dump(self, fh)
print >> fh, '>'
indent += 1
for child in self.children:
child.dump(fh, indent)
print >> fh, " " * indent, "</proto>"
class Field(ProtoTreeItem):
def dump(self, fh=sys.stdout, indent=0):
print >> fh, "%s<field " % (" " * indent,),
ProtoTreeItem.dump(self, fh)
if self.children:
print >> fh, ">"
indent += 1
for child in self.children:
child.dump(fh, indent)
print >> fh, " " * indent, "</field>"
else:
print >> fh, "/>"
class ParseXML(xml.sax.handler.ContentHandler):
ELEMENT_FILE = "pdml"
ELEMENT_FRAME = "packet"
ELEMENT_PROTOCOL = "proto"
ELEMENT_FIELD = "field"
def __init__(self, cb):
self.cb = cb
self.chars = ""
self.element_stack = []
def startElement(self, name, xmlattrs):
self.chars = ""
if name == self.ELEMENT_FILE:
# Eventually, we should check version number of pdml here
elem = CaptureFile()
elif name == self.ELEMENT_FRAME:
elem = Packet(xmlattrs)
elif name == self.ELEMENT_PROTOCOL:
elem = Protocol(xmlattrs)
elif name == self.ELEMENT_FIELD:
elem = Field(xmlattrs)
else:
sys.exit("Unknown element: %s" % (name,))
self.element_stack.append(elem)
def endElement(self, name):
elem = self.element_stack.pop()
# if isinstance(elem, Field):
# if elem.get_name() == "frame.number":
# print >> sys.stderr, "Packet:", elem.get_show()
# Add element as child to previous element as long
# as there is more than 1 element in the stack. Only
# one element in the stack means that the element in
# the stack is the single CaptureFile element, and we don't
# want to add this element to that, as we only want one
# Packet element in memory at a time.
if len(self.element_stack) > 1:
parent_elem = self.element_stack[-1]
parent_elem.add_child(elem)
self.chars = ""
# If we just finished a Packet element, hand it to the
# user's callback.
if isinstance(elem, Packet):
self.cb(elem)
def characters(self, chars):
self.chars = self.chars + chars
def _create_parser(cb):
"""Internal function for setting up the SAX parser."""
# Create a parser
parser = xml.sax.make_parser()
# Create the handler
handler = ParseXML(cb)
# Tell the parser to use our handler
parser.setContentHandler(handler)
# Don't fetch the DTD, in case it is listed
parser.setFeature(xml.sax.handler.feature_external_ges, False)
return parser
def parse_fh(fh, cb):
"""Parse a PDML file, given filehandle, and call the callback function (cb),
once for each Packet object."""
parser = _create_parser(cb)
# Parse the file
parser.parse(fh)
# Close the parser ; this is erroring out, but I'm not sure why.
#parser.close()
def parse_string(text, cb):
"""Parse the PDML contained in a string."""
stream = StringIO.StringIO(text)
parse_fh(stream, cb)
def _test():
import sys
def test_cb(obj):
pass
filename = sys.argv[1]
fh = open(filename, "r")
parse_fh(fh, test_cb)
if __name__ == '__main__':
_test() |
Python | wireshark/tools/wireshark_be.py | # -*- python -*-
#
# File : wireshark_be.py
#
# Author : Frank Singleton ([email protected])
#
# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
#
# This file is a backend to "omniidl", used to generate "Wireshark"
# dissectors from IDL descriptions. The output language generated
# is "C". It will generate code to use the GIOP/IIOP get_CDR_XXX API.
#
# Please see packet-giop.h in Wireshark distro for API description.
# Wireshark is available at https://www.wireshark.org/
#
# Omniidl is part of the OmniOrb distribution, and is available at
# http://omniorb.sourceforge.net
#
# SPDX-License-Identifier: GPL-2.0-or-later
# Description:
#
# Omniidl Back-end which parses an IDL data structure provided by the frontend
# and generates packet-idl-xxx.[ch] for compiling as a dissector in Wireshark.
#
#
# Strategy.
#
# Crawl all the way down all branches until I hit "Operation", "Enum", "Attribute",
# "Struct" and "Union" nodes. Then store these nodes in lists.
#
# Pass these lists (via an object ref) to the src code
# generator (wireshark_gen) class and let it do the hard work !
#
#
# Don't forget structs can contain embedded structs etc .. so don't forget
# to peek inside and check :-)
"""Wireshark IDL compiler back-end."""
from __future__ import print_function
import string
import sys
from os import path
from omniidl import idlast, idltype, output
from wireshark_gen import wireshark_gen_C
class WiresharkVisitor:
"""This class finds the "Operation" nodes ,Enum Nodes, "Attribute" nodes, Struct Nodes
and Union Nodes. Then it hands them off to an instance of the source code generator
class "wireshark_gen" """
def __init__(self, st, debug=False):
self.DEBUG = debug
self.st = st
self.oplist = [] # list of operation nodes
self.enlist = [] # list of enum nodes
self.atlist = [] # list of attribute nodes
self.stlist = [] # list of struct nodes
self.unlist = [] # list of union nodes
def visitAST(self, node):
if self.DEBUG:
print("XXX visitAST() node = ", node)
for n in node.declarations():
if isinstance(n, idlast.Module):
self.visitModule(n)
if isinstance(n, idlast.Interface):
self.visitInterface(n)
if isinstance(n, idlast.Operation):
self.visitOperation(n)
if isinstance(n, idlast.Attribute):
self.visitAttribute(n)
if isinstance(n, idlast.Enum):
self.visitEnum(n)
if isinstance(n, idlast.Struct):
self.visitStruct(n)
if isinstance(n, idlast.Union):
self.visitUnion(n)
# Check for Typedef structs and unions
if isinstance(n, idlast.Typedef):
self.visitTypedef(n) # who are you ?
def visitModule(self, node):
if self.DEBUG:
print("XXX visitModule() node = ", node)
for n in node.definitions():
if isinstance(n, idlast.Module):
self.visitModule(n)
if isinstance(n, idlast.Interface):
self.visitInterface(n)
if isinstance(n, idlast.Operation):
self.visitOperation(n)
if isinstance(n, idlast.Attribute):
self.visitAttribute(n)
if isinstance(n, idlast.Enum):
self.visitEnum(n)
if isinstance(n, idlast.Struct):
self.visitStruct(n)
if isinstance(n, idlast.Union):
self.visitUnion(n)
# Check for Typedef structs and unions
if isinstance(n, idlast.Typedef):
self.visitTypedef(n) # who are you ?
def visitInterface(self, node):
if self.DEBUG:
print("XXX visitInterface() node = ", node)
for c in node.callables():
if isinstance(c, idlast.Operation):
self.visitOperation(c)
if isinstance(c, idlast.Attribute):
self.visitAttribute(c)
for d in node.contents():
if isinstance(d, idlast.Enum):
self.visitEnum(d)
if isinstance(d, idlast.Struct):
self.visitStruct(d)
if isinstance(d, idlast.Union):
self.visitUnion(d)
# Check for Typedef structs and unions
if isinstance(d, idlast.Typedef):
self.visitTypedef(d) # who are you ?
def visitOperation(self, opnode):
"""populates the operations node list "oplist" """
if opnode not in self.oplist:
self.oplist.append(opnode) # store operation node
def visitAttribute(self, atnode):
"""populates the attribute node list "atlist" """
if atnode not in self.atlist:
self.atlist.append(atnode) # store attribute node
def visitEnum(self, enode):
"""populates the Enum node list "enlist" """
if enode not in self.enlist:
self.enlist.append(enode) # store enum node if unique
def visitTypedef(self, td):
"""Search to see if its a typedef'd struct, union, or enum
eg: typdef enum colors {red, green, blue } mycolors;
"""
d = td.aliasType() # get Type, possibly Declared
if isinstance(d, idltype.Declared):
self.visitDeclared(d)
def visitDeclared(self, d):
"""Search to see if its a struct, union, or enum"""
if isinstance(d, idltype.Declared):
sue = d.decl() # grab the struct or union or enum
if isinstance(sue, idlast.Struct):
self.visitStruct(sue)
if isinstance(sue, idlast.Union):
self.visitUnion(sue)
if isinstance(sue, idlast.Enum):
self.visitEnum(sue)
def visitStruct(self, stnode):
# populates the struct node list "stlist"
# and checks its members also
if stnode not in self.stlist:
self.stlist.append(stnode) # store struct node if unique and avoid recursive loops
# if we come across recursive structs
for m in stnode.members(): # find embedded struct definitions within this
mt = m.memberType()
if isinstance(mt, idltype.Declared):
self.visitDeclared(mt) # if declared, then check it out
def visitUnion(self, unnode):
# populates the struct node list "unlist"
# and checks its members also
if unnode not in self.unlist:
self.unlist.append(unnode) # store union node if unique
if unnode.constrType(): # enum defined within switch type
if isinstance(unnode.switchType(), idltype.Declared):
self.visitDeclared(unnode.switchType())
for c in unnode.cases():
ct = c.caseType()
if isinstance(ct, idltype.Declared):
self.visitDeclared(ct) # if declared, then check it out
def run(tree, args):
DEBUG = "debug" in args
AGGRESSIVE = "aggressive" in args
st = output.Stream(sys.stdout, 4) # set indent for stream
ev = WiresharkVisitor(st, DEBUG) # create visitor object
ev.visitAST(tree) # go find some operations
# Grab name of main IDL file being compiled.
#
# Assumption: Name is of the form abcdefg.xyz (eg: CosNaming.idl)
fname = path.basename(tree.file()) # grab basename only, don't care about path
nl = fname.split(".")[0] # split name of main IDL file using "." as separator
# and grab first field (eg: CosNaming)
if DEBUG:
for i in ev.oplist:
print("XXX - Operation node ", i, " repoId() = ", i.repoId())
for i in ev.atlist:
print("XXX - Attribute node ", i, " identifiers() = ", i.identifiers())
for i in ev.enlist:
print("XXX - Enum node ", i, " repoId() = ", i.repoId())
for i in ev.stlist:
print("XXX - Struct node ", i, " repoId() = ", i.repoId())
for i in ev.unlist:
print("XXX - Union node ", i, " repoId() = ", i.repoId())
# create a C generator object
# and generate some C code
eg = wireshark_gen_C(ev.st,
nl.upper(),
nl.lower(),
nl.capitalize() + " Dissector Using GIOP API",
debug=DEBUG,
aggressive=AGGRESSIVE)
eg.genCode(ev.oplist, ev.atlist, ev.enlist, ev.stlist, ev.unlist) # pass them onto the C generator
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
# |
Python | wireshark/tools/wireshark_gen.py | # -*- python -*-
#
# wireshark_gen.py (part of idl2wrs)
#
# Author : Frank Singleton ([email protected])
#
# Copyright (C) 2001 Frank Singleton, Ericsson Inc.
#
# This file is a backend to "omniidl", used to generate "Wireshark"
# dissectors from CORBA IDL descriptions. The output language generated
# is "C". It will generate code to use the GIOP/IIOP get_CDR_XXX API.
#
# Please see packet-giop.h in Wireshark distro for API description.
# Wireshark is available at https://www.wireshark.org/
#
# Omniidl is part of the OmniOrb distribution, and is available at
# http://omniorb.sourceforge.net
#
# SPDX-License-Identifier: GPL-2.0-or-later
# Description:
#
# Omniidl Back-end which parses an IDL list of "Operation" nodes
# passed from wireshark_be2.py and generates "C" code for compiling
# as a dissector for Wireshark.
#
#
# Strategy (sneaky but ...)
#
# problem: I don't know what variables to declare until AFTER the helper functions
# have been built, so ...
#
# There are 2 passes through genHelpers, the first one is there just to
# make sure the fn_hash data struct is populated properly.
# The second pass is the real thing, generating code and declaring
# variables (from the 1st pass) properly.
"""Wireshark IDL compiler back-end."""
from __future__ import print_function
import collections
import tempfile
import string
import random
from omniidl import idlast, idltype, idlutil, output
# Output class, generates "C" src code for the sub-dissector
#
# in:
#
#
# self - me
# st - output stream
# node - a reference to an Operations object.
# name - scoped name (Module::Module::Interface:: .. ::Operation
# TODO -- FS
#
# 1. generate hf[] data for searchable fields (but what is searchable?) [done, could be improved]
# 2. add item instead of add_text() [done]
# 3. sequence handling [done]
# 4. User Exceptions [done]
# 5. Fix arrays, and structs containing arrays [done]
# 6. Handle pragmas.
# 7. Exception can be common to many operations, so handle them outside the
# operation helper functions [done]
# 8. Automatic variable declaration [done, improve, still get some collisions.add variable delegator function ]
# For example, mutlidimensional arrays.
# 9. wchar and wstring handling [giop API needs improving]
# 10. Support Fixed [done]
# 11. Support attributes (get/set) [started, needs language mapping option, perhaps wireshark GUI option
# to set the attribute function prefix or suffix ? ] For now the prefix is "_get" and "_set"
# eg: attribute string apple => _get_apple and _set_apple
#
# 12. Implement IDL "union" code [done]
# 13. Implement support for plugins [done]
# 14. Don't generate code for empty operations (cf: exceptions without members)
# 15. Generate code to display Enums numerically and symbolically [done]
# 16. Place structs/unions in subtrees [done]
# 17. Recursive struct and union handling [done]
# 18. Improve variable naming for display (eg: structs, unions etc) [done]
#
# Also test, Test, TEST
# Strategy:
# For every operation and attribute do
# For return val and all parameters do
# find basic IDL type for each parameter
# output get_CDR_xxx
# output exception handling code
# output attribute handling code
class wireshark_gen_C:
# Some string constants for our templates
c_u_octet8 = "guint64 u_octet8;"
c_s_octet8 = "gint64 s_octet8;"
c_u_octet4 = "guint32 u_octet4;"
c_s_octet4 = "gint32 s_octet4;"
c_u_octet2 = "guint16 u_octet2;"
c_s_octet2 = "gint16 s_octet2;"
c_u_octet1 = "guint8 u_octet1;"
c_s_octet1 = "gint8 s_octet1;"
c_float = "gfloat my_float;"
c_double = "gdouble my_double;"
c_seq = "const gchar *seq = NULL;" # pointer to buffer of gchars
c_i = "guint32 i_" # loop index
c_i_lim = "guint32 u_octet4_loop_" # loop limit
c_u_disc = "guint32 disc_u_" # unsigned int union discriminant variable name (enum)
c_s_disc = "gint32 disc_s_" # signed int union discriminant variable name (other cases, except Enum)
def __init__(self, st, protocol_name, dissector_name, description, debug=False, aggressive=False):
self.DEBUG = debug
self.AGGRESSIVE = aggressive
self.st = output.Stream(tempfile.TemporaryFile(mode="w"), 4) # for first pass only
self.st_save = st # where 2nd pass should go
self.protoname = protocol_name # Protocol Name (eg: ECHO)
self.dissname = dissector_name # Dissector name (eg: echo)
self.description = description # Detailed Protocol description (eg: Echo IDL Example)
self.exlist = [] # list of exceptions used in operations.
#self.curr_sname # scoped name of current opnode or exnode I am visiting, used for generating "C" var declares
self.fn_hash = {} # top level hash to contain key = function/exception and val = list of variable declarations
# ie a hash of lists
self.fn_hash_built = 0 # flag to indicate the 1st pass is complete, and the fn_hash is correctly
# populated with operations/vars and exceptions/vars
def genCode(self, oplist, atlist, enlist, stlist, unlist): # operation, attribute, enums, struct and union lists
"""Main entry point, controls sequence of generated code."""
# sneaky .. call it now, to populate the fn_hash
# so when I come to that exception later, I have the variables to
# declare already.
# need to reverse the lists, so that the functions of the current IDL
# is properly processed, otherwise the first name wise declaration of
# an include is taken for the function generation. Same counts for
# structs and unions.
oplist = oplist[::-1]
stlist = stlist[::-1]
enlist = enlist[::-1]
unlist = unlist[::-1]
self.genHelpers(oplist, stlist, unlist)
self.genExceptionHelpers(oplist)
self.genAttributeHelpers(atlist)
self.fn_hash_built = 1 # DONE, so now I know , see genOperation()
self.st = self.st_save
self.genHeader() # initial dissector comments
self.genWrsCopyright()
self.genGPL()
self.genIncludes()
self.genPrototype()
self.genProtocol()
self.genDeclares(oplist, atlist, enlist, stlist, unlist)
if len(atlist) > 0:
self.genAtList(atlist) # string constant declares for Attributes
if len(enlist) > 0:
self.genEnList(enlist) # string constant declares for Enums
if len(unlist) > 0:
self.genUnList(unlist)
self.genExceptionHelpers(oplist) # helper function to decode user exceptions that have members
self.genExceptionDelegator(oplist) # finds the helper function to decode a user exception
if len(atlist) > 0:
self.genAttributeHelpers(atlist) # helper function to decode "attributes"
self.genHelpers(oplist, stlist, unlist) # operation, struct and union decode helper functions
self.genMainEntryStart(oplist)
self.genOpDelegator(oplist)
self.genAtDelegator(atlist)
self.genMainEntryEnd()
self.gen_proto_register(oplist, atlist, stlist, unlist)
self.gen_proto_reg_handoff(oplist)
# All the dissectors are now built-in
#self.gen_plugin_register()
if self.DEBUG:
self.dumpvars() # debug
self.genModelines()
def genHeader(self):
"""Generate Standard Wireshark Header Comments"""
self.st.out(self.template_Header, dissector_name=self.dissname)
if self.DEBUG:
print("//XXX genHeader")
def genWrsCopyright(self):
if self.DEBUG:
print("//XXX genWrsCopyright")
self.st.out(self.template_wireshark_copyright)
def genModelines(self):
if self.DEBUG:
print("//XXX genModelines")
self.st.out(self.template_Modelines)
def genGPL(self):
if self.DEBUG:
print("//XXX genGPL")
self.st.out(self.template_GPL)
def genIncludes(self):
if self.DEBUG:
print("//XXX genIncludes")
self.st.out(self.template_Includes)
def genOpDeclares(self, op):
"""" Generate hf variables for operation filters
in: opnode ( an operation node)
"""
if self.DEBUG:
print("//XXX genOpDeclares")
print("//XXX return type = ", op.returnType().kind())
sname = self.namespace(op, "_")
rt = op.returnType()
if rt.kind() != idltype.tk_void:
if rt.kind() == idltype.tk_alias: # a typdef return val possibly ?
#self.get_CDR_alias(rt, rt.name())
if rt.unalias().kind() == idltype.tk_sequence:
self.st.out(self.template_hf, name=sname + "_return_loop")
if self.isSeqNativeType(rt.unalias().seqType()) or self.AGGRESSIVE:
self.st.out(self.template_hf, name=sname + "_return")
elif (rt.unalias().kind() != idltype.tk_struct and
rt.unalias().kind() != idltype.tk_objref and
rt.unalias().kind() != idltype.tk_any):
self.st.out(self.template_hf, name=sname + "_return")
elif (rt.kind() != idltype.tk_struct and
rt.kind() != idltype.tk_objref and
rt.kind() != idltype.tk_union and
rt.kind() != idltype.tk_any):
self.st.out(self.template_hf, name=sname + "_return")
for p in op.parameters():
if p.paramType().unalias().kind() == idltype.tk_sequence:
self.st.out(self.template_hf, name=sname + "_" + p.identifier() + "_loop")
if (self.isSeqNativeType(p.paramType().unalias().seqType())) or self.AGGRESSIVE:
self.st.out(self.template_hf, name=sname + "_" + p.identifier())
elif (p.paramType().unalias().kind() != idltype.tk_any and
p.paramType().unalias().kind() != idltype.tk_struct and
p.paramType().unalias().kind() != idltype.tk_objref and
p.paramType().unalias().kind() != idltype.tk_union):
if p.paramType().unalias().kind() == idltype.tk_wchar:
self.st.out(self.template_hf, name=sname + "_" + p.identifier() + "_len")
self.st.out(self.template_hf, name=sname + "_" + p.identifier())
def genAtDeclares(self, at):
"""Generate hf variables for attributes
in: at ( an attribute)
"""
if self.DEBUG:
print("//XXX genAtDeclares")
for decl in at.declarators():
sname = self.namespace(decl, "_")
self.st.out(self.template_hf, name="get" + "_" + sname + "_" + decl.identifier())
if self.AGGRESSIVE:
self.st.out(self.template_hf, name="get" + "_" + sname + "_" + decl.identifier()+"_loop")
if not at.readonly():
self.st.out(self.template_hf, name="set" + "_" + sname + "_" + decl.identifier())
if self.AGGRESSIVE:
self.st.out(self.template_hf, name="set" + "_" + sname + "_" + decl.identifier()+"_loop")
def genStDeclares(self, st):
"""Generate hf variables for structs
in: st ( a struct)
"""
if self.DEBUG:
print("//XXX genStDeclares")
sname = self.namespace(st, "_")
for m in st.members():
if (self.isSeqNativeType(m.memberType())
or m.memberType().unalias().kind() == idltype.tk_sequence
or m.memberType().unalias().kind() == idltype.tk_alias):
for decl in m.declarators():
if m.memberType().unalias().kind() == idltype.tk_sequence:
self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_loop")
if (self.isSeqNativeType(m.memberType().unalias().seqType())) or self.AGGRESSIVE:
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
else:
if m.memberType().unalias().kind() == idltype.tk_wchar:
self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_len")
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
def genExDeclares(self, ex):
"""Generate hf variables for user exception filters
in: exnode ( an exception node)
"""
if self.DEBUG:
print("//XXX genExDeclares")
sname = self.namespace(ex, "_")
for m in ex.members():
for decl in m.declarators():
if m.memberType().unalias().kind() == idltype.tk_sequence:
if self.isSeqNativeType(m.memberType().unalias().seqType()):
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
self.st.out(self.template_hf, name=sname + "_" + decl.identifier() + "_loop")
elif m.memberType().unalias().kind() != idltype.tk_struct:
self.st.out(self.template_hf, name=sname + "_" + decl.identifier())
def genUnionDeclares(self, un):
"""Generate hf variables for union filters
in: un ( an union)
"""
if self.DEBUG:
print("//XXX genUnionDeclares")
sname = self.namespace(un, "_")
self.st.out(self.template_hf, name=sname + "_" + un.identifier())
for uc in un.cases(): # for all UnionCase objects in this union
# TODO: Is this loop necessary? cl is not used
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
if uc.caseType().unalias().kind() == idltype.tk_sequence:
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier() + "_loop")
if self.isSeqNativeType(uc.caseType().unalias().seqType()):
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier())
elif self.isSeqNativeType(uc.caseType()):
if uc.caseType().unalias().kind() == idltype.tk_wchar:
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier() + "_len")
self.st.out(self.template_hf, name=sname + "_" + uc.declarator().identifier())
def genExpertInfoDeclares(self):
"""Generate ei variables for expert info filters"""
if self.DEBUG:
print("//XXX genExpertInfoDeclares")
self.st.out(self.template_proto_register_ei_filters, dissector_name=self.dissname)
def genDeclares(self, oplist, atlist, enlist, stlist, unlist):
"""generate function prototypes if required
Currently this is used for struct and union helper function declarations.
"""
if self.DEBUG:
print("//XXX genDeclares")
# prototype for operation filters
self.st.out(self.template_hf_operations)
# operation specific filters
if len(oplist) > 0:
self.st.out(self.template_proto_register_op_filter_comment)
for op in oplist:
self.genOpDeclares(op)
# attribute filters
if len(atlist) > 0:
self.st.out(self.template_proto_register_at_filter_comment)
for at in atlist:
self.genAtDeclares(at)
# struct filters
if len(stlist) > 0:
self.st.out(self.template_proto_register_st_filter_comment)
for st in stlist:
self.genStDeclares(st)
# exception List filters
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
if len(exlist) > 0:
self.st.out(self.template_proto_register_ex_filter_comment)
for ex in exlist:
if ex.members(): # only if has members
self.genExDeclares(ex)
# union filters
if len(unlist) > 0:
self.st.out(self.template_proto_register_un_filter_comment)
for un in unlist:
self.genUnionDeclares(un)
# expert info filters
self.genExpertInfoDeclares()
# prototype for start_dissecting()
self.st.out(self.template_prototype_start_dissecting)
# struct prototypes
if len(stlist):
self.st.out(self.template_prototype_struct_start)
for st in stlist:
#print st.repoId()
sname = self.namespace(st, "_")
self.st.out(self.template_prototype_struct_body, stname=st.repoId(), name=sname)
self.st.out(self.template_prototype_struct_end)
# union prototypes
if len(unlist):
self.st.out(self.template_prototype_union_start)
for un in unlist:
sname = self.namespace(un, "_")
self.st.out(self.template_prototype_union_body, unname=un.repoId(), name=sname)
self.st.out(self.template_prototype_union_end)
def genPrototype(self):
self.st.out(self.template_prototype, dissector_name=self.dissname)
def genProtocol(self):
self.st.out(self.template_protocol, dissector_name=self.dissname)
self.st.out(self.template_init_boundary)
def genMainEntryStart(self, oplist):
self.st.out(self.template_main_dissector_start, dissname=self.dissname, disprot=self.protoname)
self.st.inc_indent()
self.st.out(self.template_main_dissector_switch_msgtype_start)
self.st.out(self.template_main_dissector_switch_msgtype_start_request_reply)
self.st.inc_indent()
def genMainEntryEnd(self):
self.st.out(self.template_main_dissector_switch_msgtype_end_request_reply)
self.st.dec_indent()
self.st.out(self.template_main_dissector_switch_msgtype_all_other_msgtype)
self.st.dec_indent()
self.st.out(self.template_main_dissector_end)
# NOTE: Mapping of attributes to operation(function) names is tricky.
#
# The actual accessor function names are language-mapping specific. The attribute name
# is subject to OMG IDL's name scoping rules; the accessor function names are
# guaranteed not to collide with any legal operation names specifiable in OMG IDL.
#
# eg:
#
# static const char get_Penguin_Echo_get_width_at[] = "get_width" ;
# static const char set_Penguin_Echo_set_width_at[] = "set_width" ;
#
# or:
#
# static const char get_Penguin_Echo_get_width_at[] = "_get_width" ;
# static const char set_Penguin_Echo_set_width_at[] = "_set_width" ;
#
# TODO: Implement some language dependent templates to handle naming conventions
# language <=> attribute. for C, C++. Java etc
#
# OR, just add a runtime GUI option to select language binding for attributes -- FS
def genAtList(self, atlist):
"""in: atlist
out: C code for IDL attribute decalarations.
ie: def genAtlist(self,atlist,language)
"""
self.st.out(self.template_comment_attributes_start)
for n in atlist:
for i in n.declarators(): #
sname = self.namespace(i, "_")
atname = i.identifier()
self.st.out(self.template_attributes_declare_Java_get, sname=sname, atname=atname)
if not n.readonly():
self.st.out(self.template_attributes_declare_Java_set, sname=sname, atname=atname)
self.st.out(self.template_comment_attributes_end)
def genEnList(self, enlist):
"""in: enlist
out: C code for IDL Enum decalarations using "static const value_string" template
"""
self.st.out(self.template_comment_enums_start)
for enum in enlist:
sname = self.namespace(enum, "_")
self.st.out(self.template_comment_enum_comment, ename=enum.repoId())
self.st.out(self.template_value_string_start, valstringname=sname)
for enumerator in enum.enumerators():
self.st.out(self.template_value_string_entry,
intval=str(self.valFromEnum(enum, enumerator)),
description=enumerator.identifier())
#atname = n.identifier()
self.st.out(self.template_value_string_end, valstringname=sname)
self.st.out(self.template_comment_enums_end)
def genUnList(self, unlist):
"""in: unlist
out: C code for IDL Union declarations using "static const value_string template
"""
for un in unlist:
if un.switchType().kind() == idltype.tk_enum:
continue # skip enums since they already have value-strings
sname = self.namespace(un, "_")
self.st.out(self.template_value_string_start, valstringname=sname)
for uc in un.cases():
for cl in uc.labels():
val = cl.value()
self.st.out(self.template_value_string_entry,
intval=str(val),
description=uc.declarator().identifier())
self.st.out(self.template_value_string_end, valstringname=sname)
def genExceptionDelegator(self, oplist):
"""in: oplist
out: C code for User exception delegator
"""
self.st.out(self.template_main_exception_delegator_start)
self.st.inc_indent()
exlist = self.get_exceptionList(oplist) # grab list of ALL UNIQUE exception nodes
for ex in exlist:
if self.DEBUG:
print("//XXX Exception ", ex.repoId())
print("//XXX Exception Identifier", ex.identifier())
print("//XXX Exception Scoped Name", ex.scopedName())
if ex.members(): # only if has members
sname = self.namespace(ex, "_")
self.st.out(self.template_ex_delegate_code, sname=sname, exname=ex.repoId())
self.st.dec_indent()
self.st.out(self.template_main_exception_delegator_end)
def genAttributeHelpers(self, atlist):
"""Generate private helper functions to decode Attributes.
in: atlist
For readonly attribute - generate get_xxx()
If NOT readonly attribute - also generate set_xxx()
"""
if self.DEBUG:
print("//XXX genAttributeHelpers: atlist = ", atlist)
self.st.out(self.template_attribute_helpers_start)
for attrib in atlist:
for decl in attrib.declarators():
self.genAtHelper(attrib, decl, "get") # get accessor
if not attrib.readonly():
self.genAtHelper(attrib, decl, "set") # set accessor
self.st.out(self.template_attribute_helpers_end)
def genAtHelper(self, attrib, decl, order):
"""Generate private helper functions to decode an attribute
in: at - attribute node
in: decl - declarator belonging to this attribute
in: order - to generate a "get" or "set" helper
"""
if self.DEBUG:
print("//XXX genAtHelper")
sname = order + "_" + self.namespace(decl, "_") # must use set or get prefix to avoid collision
self.curr_sname = sname # update current opnode/exnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_attribute_helper_function_start, sname=sname, atname=decl.repoId())
self.st.inc_indent()
attr_type = attrib.attrType()
if self.DEBUG:
print("//XXX attrib = ", attrib)
print("//XXX attrib.attrType.unalias.kind = ", attr_type.unalias().kind())
if self.isItemVarType(attr_type):
self.st.out(self.template_proto_item)
if len(self.fn_hash[sname]) > 0:
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end_item)
self.getCDR(attr_type, sname + "_" + decl.identifier())
self.st.dec_indent()
self.st.out(self.template_attribute_helper_function_end)
def genExceptionHelpers(self, oplist):
"""Generate private helper functions to decode Exceptions used
within operations
in: oplist
"""
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
if self.DEBUG:
print("//XXX genExceptionHelpers: exlist = ", exlist)
self.st.out(self.template_exception_helpers_start)
for ex in exlist:
if ex.members(): # only if has members
#print("//XXX Exception = " + ex.identifier())
self.genExHelper(ex)
self.st.out(self.template_exception_helpers_end)
def genExHelper(self, ex):
"""Generate private helper functions to decode User Exceptions
in: exnode ( an exception node)
"""
if self.DEBUG:
print("//XXX genExHelper")
# check to see if we need an item
need_item = False
for m in ex.members():
if self.isItemVarType(m.memberType()):
need_item = True
break
sname = self.namespace(ex, "_")
self.curr_sname = sname # update current opnode/exnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_exception_helper_function_start, sname=sname, exname=ex.repoId())
self.st.inc_indent()
if need_item:
self.st.out(self.template_proto_item)
if len(self.fn_hash[sname]) > 0:
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
if need_item:
self.st.out(self.template_helper_function_vars_end_item)
else:
self.st.out(self.template_helper_function_vars_end)
for m in ex.members():
if self.DEBUG:
print("//XXX genExhelper, member = ", m, "member type = ", m.memberType())
for decl in m.declarators():
if self.DEBUG:
print("//XXX genExhelper, d = ", decl)
if decl.sizes(): # an array
arr_nonce = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(12))
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
self.st.out(self.template_get_CDR_array_start, nonce=arr_nonce, aname=decl.identifier(), aval=string_indices)
self.st.inc_indent()
self.addvar(self.c_i + decl.identifier() + ";")
self.st.inc_indent()
self.getCDR(m.memberType(), sname + "_" + decl.identifier())
self.st.dec_indent()
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end, nonce=arr_nonce)
else:
self.getCDR(m.memberType(), sname + "_" + decl.identifier())
self.st.dec_indent()
self.st.out(self.template_exception_helper_function_end)
def genHelpers(self, oplist, stlist, unlist):
"""Generate private helper functions
Generate private helper functions for each IDL operation.
Generate private helper functions for each IDL struct.
Generate private helper functions for each IDL union.
in: oplist, stlist, unlist
"""
for op in oplist:
self.genOperation(op)
for st in stlist:
self.genStructHelper(st)
for un in unlist:
self.genUnionHelper(un)
def genOperation(self, opnode):
"""Generate private helper functions for a specific IDL operation.
in: opnode
"""
if self.DEBUG:
print("//XXX genOperation called")
print("//opnode =", opnode)
print("//repoid =", opnode.repoId())
sname = self.namespace(opnode, "_")
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.curr_sname = sname # update current opnode's scoped name
opname = opnode.identifier()
self.st.out(self.template_helper_function_comment, repoid=opnode.repoId())
self.st.out(self.template_helper_function_start, sname=sname)
self.st.inc_indent()
if len(self.fn_hash[sname]) > 0:
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end_item)
self.st.out(self.template_helper_switch_msgtype_start)
self.st.out(self.template_helper_switch_msgtype_request_start)
self.st.inc_indent()
self.genOperationRequest(opnode)
self.st.out(self.template_helper_switch_msgtype_request_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_start)
self.st.inc_indent()
self.st.out(self.template_helper_switch_rep_status_start)
self.st.out(self.template_helper_switch_msgtype_reply_no_exception_start)
self.st.inc_indent()
self.genOperationReply(opnode)
self.st.out(self.template_helper_switch_msgtype_reply_no_exception_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_user_exception_start)
self.st.inc_indent()
self.genOpExceptions(opnode)
self.st.out(self.template_helper_switch_msgtype_reply_user_exception_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_reply_default_start, dissector_name=self.dissname)
self.st.out(self.template_helper_switch_msgtype_reply_default_end)
self.st.out(self.template_helper_switch_rep_status_end)
self.st.dec_indent()
self.st.out(self.template_helper_switch_msgtype_default_start, dissector_name=self.dissname)
self.st.out(self.template_helper_switch_msgtype_default_end)
self.st.out(self.template_helper_switch_msgtype_end)
self.st.dec_indent()
self.st.out(self.template_helper_function_end, sname=sname)
def genOperationRequest(self, opnode):
"""Decode function parameters for a GIOP request message"""
for p in opnode.parameters():
if p.is_in():
if self.DEBUG:
print("//XXX parameter = ", p)
print("//XXX parameter type = ", p.paramType())
print("//XXX parameter type kind = ", p.paramType().kind())
self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
def genOperationReply(self, opnode):
"""Decode function parameters for a GIOP reply message"""
rt = opnode.returnType() # get return type
if self.DEBUG:
print("//XXX genOperationReply")
print("//XXX opnode = ", opnode)
print("//XXX return type = ", rt)
print("//XXX return type.unalias = ", rt.unalias())
print("//XXX return type.kind() = ", rt.kind())
sname = self.namespace(opnode, "_")
if rt.kind() == idltype.tk_alias: # a typdef return val possibly ?
#self.getCDR(rt.decl().alias().aliasType(),"dummy") # return value maybe a typedef
self.get_CDR_alias(rt, sname + "_return")
#self.get_CDR_alias(rt, rt.name())
else:
self.getCDR(rt, sname + "_return") # return value is NOT an alias
for p in opnode.parameters():
if p.is_out(): # out or inout
self.getCDR(p.paramType(), self.curr_sname + "_" + p.identifier())
#self.st.dec_indent()
# TODO: this method seems unnecessary
def genOpExceptions(self, opnode):
for ex in opnode.raises():
if ex.members():
#print ex.members()
for m in ex.members():
t = 0
#print m.memberType(), m.memberType().kind()
def genOpDelegator(self, oplist):
"""Delegator for Operations"""
if len(oplist) == 0:
self.st.out(self.template_no_ops_to_delegate)
for op in oplist:
iname = "/".join(op.scopedName()[:-1])
opname = op.identifier()
sname = self.namespace(op, "_")
self.st.out(self.template_op_delegate_code, interface=iname, sname=sname, opname=opname)
def genAtDelegator(self, atlist):
"""Delegator for Attributes"""
for a in atlist:
for i in a.declarators():
sname = self.namespace(i, "_")
self.st.out(self.template_at_delegate_code_get, sname=sname)
if not a.readonly():
self.st.out(self.template_at_delegate_code_set, sname=sname)
def addvar(self, var):
"""Add a variable declaration to the hash of list"""
if var not in self.fn_hash[self.curr_sname]:
self.fn_hash[self.curr_sname].append(var)
def dumpvars(self):
"""Print the variable declaration from the hash of list"""
for fn in self.fn_hash.keys():
print("FN = " + fn)
for v in self.fn_hash[fn]:
print("-> " + v)
def dumpCvars(self, sname):
"""Print the "C" variable declaration from the hash of list
for a given scoped operation name (eg: tux_penguin_eat)"""
for v in self.fn_hash[sname]:
self.st.out(v)
def valFromEnum(self, enumNode, enumeratorNode):
"""Given an enum node, and a enumerator node, return the enumerator's numerical value.
eg: enum Color {red,green,blue} should return
val = 1 for green
"""
if self.DEBUG:
print("//XXX valFromEnum, enumNode = ", enumNode, " from ", enumNode.repoId())
print("//XXX valFromEnum, enumeratorNode = ", enumeratorNode, " from ", enumeratorNode.repoId())
if isinstance(enumeratorNode, idlast.Enumerator):
value = enumNode.enumerators().index(enumeratorNode)
return value
# tk_null = 0
# tk_void = 1
# tk_short = 2
# tk_long = 3
# tk_ushort = 4
# tk_ulong = 5
# tk_float = 6
# tk_double = 7
# tk_boolean = 8
# tk_char = 9
# tk_octet = 10
# tk_any = 11
# tk_TypeCode = 12
# tk_Principal = 13
# tk_objref = 14
# tk_struct = 15
# tk_union = 16
# tk_enum = 17
# tk_string = 18
# tk_sequence = 19
# tk_array = 20
# tk_alias = 21
# tk_except = 22
# tk_longlong = 23
# tk_ulonglong = 24
# tk_longdouble = 25
# tk_wchar = 26
# tk_wstring = 27
# tk_fixed = 28
# tk_value = 29
# tk_value_box = 30
# tk_native = 31
# tk_abstract_interface = 32
def isSeqNativeType(self, type):
"""Return true for "native" datatypes that will generate a direct proto_tree_add_xxx
call for a sequence. Used to determine if a separate hf variable is needed for
the loop over the sequence"""
pt = type.unalias().kind() # param CDR type
if self.DEBUG:
print("//XXX isSeqNativeType: kind = ", pt)
if pt == idltype.tk_ulong:
return 1
elif pt == idltype.tk_longlong:
return 1
elif pt == idltype.tk_ulonglong:
return 1
elif pt == idltype.tk_short:
return 1
elif pt == idltype.tk_long:
return 1
elif pt == idltype.tk_ushort:
return 1
elif pt == idltype.tk_float:
return 1
elif pt == idltype.tk_double:
return 1
elif pt == idltype.tk_boolean:
return 1
elif pt == idltype.tk_octet:
return 1
elif pt == idltype.tk_enum:
return 1
elif pt == idltype.tk_string:
return 1
elif pt == idltype.tk_wstring:
return 1
elif pt == idltype.tk_wchar:
return 1
elif pt == idltype.tk_char:
return 1
else:
return 0
def isItemVarType(self, type):
pt = type.unalias().kind() # param CDR type
if self.DEBUG:
print("//XXX isItemVarType: kind = ", pt)
inner_pt = None
if pt in [idltype.tk_struct, idltype.tk_fixed, idltype.tk_any]:
return 1
elif pt == idltype.tk_alias:
inner_pt = type.decl().alias().aliasType().unalias().kind()
elif pt == idltype.tk_sequence:
inner_pt = type.unalias().seqType().unalias().kind()
elif pt == idltype.tk_array:
inner_pt == type.decl().alias().aliasType().unalias().kind()
if inner_pt is not None and inner_pt in \
[idltype.tk_struct, idltype.tk_fixed, idltype.tk_any]:
return 1
elif inner_pt in [idltype.tk_alias, idltype.tk_sequence,\
idltype.tk_array]:
return self.isItemVarType(inner_pt)
return 0
def getCDR(self, type, name="fred"):
"""This is the main "iterator" function. It takes a node, and tries to output
a get_CDR_XXX accessor method(s). It can call itself multiple times
if it finds nested structures etc."""
pt = type.unalias().kind() # param CDR type
pn = name # param name
if self.DEBUG:
print("//XXX getCDR: kind = ", pt)
print("//XXX getCDR: name = ", pn)
if pt == idltype.tk_ulong:
self.get_CDR_ulong(pn)
elif pt == idltype.tk_longlong:
self.get_CDR_longlong(pn)
elif pt == idltype.tk_ulonglong:
self.get_CDR_ulonglong(pn)
elif pt == idltype.tk_void:
self.get_CDR_void(pn)
elif pt == idltype.tk_short:
self.get_CDR_short(pn)
elif pt == idltype.tk_long:
self.get_CDR_long(pn)
elif pt == idltype.tk_ushort:
self.get_CDR_ushort(pn)
elif pt == idltype.tk_float:
self.get_CDR_float(pn)
elif pt == idltype.tk_double:
self.get_CDR_double(pn)
elif pt == idltype.tk_fixed:
self.get_CDR_fixed(type.unalias(), pn)
elif pt == idltype.tk_boolean:
self.get_CDR_boolean(pn)
elif pt == idltype.tk_char:
self.get_CDR_char(pn)
elif pt == idltype.tk_octet:
self.get_CDR_octet(pn)
elif pt == idltype.tk_any:
self.get_CDR_any(pn)
elif pt == idltype.tk_string:
self.get_CDR_string(pn)
elif pt == idltype.tk_wstring:
self.get_CDR_wstring(pn)
elif pt == idltype.tk_wchar:
self.get_CDR_wchar(pn)
elif pt == idltype.tk_enum:
#print type.decl()
self.get_CDR_enum(pn, type)
#self.get_CDR_enum(pn)
elif pt == idltype.tk_struct:
self.get_CDR_struct(type, pn)
elif pt == idltype.tk_TypeCode: # will I ever get here ?
self.get_CDR_TypeCode(pn)
elif pt == idltype.tk_sequence:
if type.unalias().seqType().kind() == idltype.tk_octet:
self.get_CDR_sequence_octet(type, pn)
else:
self.get_CDR_sequence(type, pn)
elif pt == idltype.tk_objref:
self.get_CDR_objref(type, pn)
elif pt == idltype.tk_array:
pass # Supported elsewhere
elif pt == idltype.tk_union:
self.get_CDR_union(type, pn)
elif pt == idltype.tk_alias:
if self.DEBUG:
print("//XXXXX Alias type XXXXX ", type)
self.get_CDR_alias(type, pn)
else:
self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
def get_CDR_ulong(self, pn):
self.st.out(self.template_get_CDR_ulong, hfname=pn)
def get_CDR_short(self, pn):
self.st.out(self.template_get_CDR_short, hfname=pn)
def get_CDR_void(self, pn):
self.st.out(self.template_get_CDR_void, hfname=pn)
def get_CDR_long(self, pn):
self.st.out(self.template_get_CDR_long, hfname=pn)
def get_CDR_ushort(self, pn):
self.st.out(self.template_get_CDR_ushort, hfname=pn)
def get_CDR_float(self, pn):
self.st.out(self.template_get_CDR_float, hfname=pn)
def get_CDR_double(self, pn):
self.st.out(self.template_get_CDR_double, hfname=pn)
def get_CDR_longlong(self, pn):
self.st.out(self.template_get_CDR_longlong, hfname=pn)
def get_CDR_ulonglong(self, pn):
self.st.out(self.template_get_CDR_ulonglong, hfname=pn)
def get_CDR_boolean(self, pn):
self.st.out(self.template_get_CDR_boolean, hfname=pn)
def get_CDR_fixed(self, type, pn):
if self.DEBUG:
print("//XXXX calling get_CDR_fixed, type = ", type)
print("//XXXX calling get_CDR_fixed, type.digits() = ", type.digits())
print("//XXXX calling get_CDR_fixed, type.scale() = ", type.scale())
string_digits = '%i ' % type.digits() # convert int to string
string_scale = '%i ' % type.scale() # convert int to string
string_length = '%i ' % self.dig_to_len(type.digits()) # how many octets to hilight for a number of digits
self.st.out(self.template_get_CDR_fixed, hfname=pn, digits=string_digits, scale=string_scale, length=string_length)
self.addvar(self.c_seq)
def get_CDR_char(self, pn):
self.st.out(self.template_get_CDR_char, hfname=pn)
def get_CDR_octet(self, pn):
self.st.out(self.template_get_CDR_octet, hfname=pn)
def get_CDR_any(self, pn):
self.st.out(self.template_get_CDR_any, varname=pn)
def get_CDR_enum(self, pn, type):
#self.st.out(self.template_get_CDR_enum, hfname=pn)
sname = self.namespace(type.unalias(), "_")
self.st.out(self.template_get_CDR_enum_symbolic, valstringarray=sname, hfname=pn)
self.addvar(self.c_u_octet4)
def get_CDR_string(self, pn):
self.st.out(self.template_get_CDR_string, hfname=pn)
def get_CDR_wstring(self, pn):
self.st.out(self.template_get_CDR_wstring, hfname=pn)
self.addvar(self.c_u_octet4)
self.addvar(self.c_seq)
def get_CDR_wchar(self, pn):
self.st.out(self.template_get_CDR_wchar, hfname=pn)
self.addvar(self.c_s_octet1)
self.addvar(self.c_seq)
def get_CDR_TypeCode(self, pn):
self.st.out(self.template_get_CDR_TypeCode, varname=pn)
self.addvar(self.c_u_octet4)
def get_CDR_objref(self, type, pn):
self.st.out(self.template_get_CDR_object)
def get_CDR_union(self, type, pn):
if self.DEBUG:
print("//XXX Union type =", type, " pn = ", pn)
print("//XXX Union type.decl()", type.decl())
print("//XXX Union Scoped Name", type.scopedName())
# If I am a typedef union {..}; node then find the union node
if isinstance(type.decl(), idlast.Declarator):
ntype = type.decl().alias().aliasType().decl()
else:
ntype = type.decl() # I am a union node
if self.DEBUG:
print("//XXX Union ntype =", ntype)
sname = self.namespace(ntype, "_")
self.st.out(self.template_union_start, name=sname)
# Output a call to the union helper function so I can handle recursive union also.
self.st.out(self.template_decode_union, name=sname)
self.st.out(self.template_union_end, name=sname)
def getCDR_hf(self, type, desc, filter, hf_name="fred", value_str=None):
"""This takes a node, and tries to output the appropriate item for the
hf array."""
pt = type.unalias().kind() # param CDR type
pn = hf_name # param name
if self.DEBUG:
print("//XXX getCDR_hf: kind = ", pt)
print("//XXX getCDR_hf: name = ", pn)
if pt == idltype.tk_ulong:
self.get_CDR_ulong_hf(pn, desc, filter, self.dissname, value_str)
elif pt == idltype.tk_longlong:
self.get_CDR_longlong_hf(pn, desc, filter, self.dissname, value_str)
elif pt == idltype.tk_ulonglong:
self.get_CDR_ulonglong_hf(pn, desc, filter, self.dissname, value_str)
elif pt == idltype.tk_void:
pass # no hf_ variables needed
elif pt == idltype.tk_short:
self.get_CDR_short_hf(pn, desc, filter, self.dissname, value_str)
elif pt == idltype.tk_long:
self.get_CDR_long_hf(pn, desc, filter, self.dissname, value_str)
elif pt == idltype.tk_ushort:
self.get_CDR_ushort_hf(pn, desc, filter, self.dissname, value_str)
elif pt == idltype.tk_float:
self.get_CDR_float_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_double:
self.get_CDR_double_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_fixed:
self.get_CDR_fixed_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_boolean:
self.get_CDR_boolean_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_char:
self.get_CDR_char_hf(pn, desc, filter, self.dissname, value_str)
elif pt == idltype.tk_octet:
self.get_CDR_octet_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_any:
pass # no hf_ variables needed
elif pt == idltype.tk_string:
self.get_CDR_string_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_wstring:
self.get_CDR_wstring_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_wchar:
self.get_CDR_wchar_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_enum:
self.get_CDR_enum_hf(pn, type, desc, filter, self.dissname)
elif pt == idltype.tk_struct:
pass # no hf_ variables needed (should be already contained in struct members)
elif pt == idltype.tk_TypeCode: # will I ever get here ?
self.get_CDR_TypeCode_hf(pn, desc, filter, self.dissname)
elif pt == idltype.tk_sequence:
if type.unalias().seqType().kind() == idltype.tk_octet:
self.get_CDR_sequence_octet_hf(type, pn, desc, filter, self.dissname)
else:
self.get_CDR_sequence_hf(type, pn, desc, filter, self.dissname)
elif pt == idltype.tk_objref:
pass # no object specific hf_ variables used, use generic ones from giop dissector
elif pt == idltype.tk_array:
pass # Supported elsewhere
elif pt == idltype.tk_union:
pass # no hf_ variables needed (should be already contained in union members)
elif pt == idltype.tk_alias:
if self.DEBUG:
print("//XXXXX Alias type hf //XXXXX ", type)
self.get_CDR_alias_hf(type, desc, filter, pn)
else:
self.genWARNING("Unknown typecode = " + '%i ' % pt) # put comment in source code
def get_CDR_ulong_hf(self, pn, desc, filter, diss, value_str=None):
if value_str:
self.st.out(self.template_get_CDR_ulong_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
else:
self.st.out(self.template_get_CDR_ulong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_short_hf(self, pn, desc, filter, diss, value_str=None):
if value_str:
self.st.out(self.template_get_CDR_short_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
else:
self.st.out(self.template_get_CDR_short_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_long_hf(self, pn, desc, filter, diss, value_str=None):
if value_str:
self.st.out(self.template_get_CDR_long_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
else:
self.st.out(self.template_get_CDR_long_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_ushort_hf(self, pn, desc, filter, diss, value_str=None):
if value_str:
self.st.out(self.template_get_CDR_ushort_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
else:
self.st.out(self.template_get_CDR_ushort_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_float_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_float_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_double_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_double_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_fixed_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_fixed_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_longlong_hf(self, pn, desc, filter, diss, value_str=None):
if value_str:
self.st.out(self.template_get_CDR_longlong_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
else:
self.st.out(self.template_get_CDR_longlong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_ulonglong_hf(self, pn, desc, filter, diss, value_str=None):
if value_str:
self.st.out(self.template_get_CDR_ulonglong_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
else:
self.st.out(self.template_get_CDR_ulonglong_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_boolean_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_boolean_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_char_hf(self, pn, desc, filter, diss, value_str=None):
if value_str:
self.st.out(self.template_get_CDR_char_symbolic_hf, valstringarray=value_str, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
else:
self.st.out(self.template_get_CDR_char_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_octet_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_enum_hf(self, pn, type, desc, filter, diss):
sname = self.namespace(type.unalias(), "_")
self.st.out(self.template_get_CDR_enum_symbolic_hf, valstringarray=sname, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_string_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_string_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_wstring_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_wstring_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
# self.addvar(self.c_u_octet4)
# self.addvar(self.c_seq)
def get_CDR_wchar_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_wchar_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
# self.addvar(self.c_s_octet1)
# self.addvar(self.c_seq)
def get_CDR_TypeCode_hf(self, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_TypeCode_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_sequence_octet_hf(self, type, pn, desc, filter, diss):
self.st.out(self.template_get_CDR_sequence_octet_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
def get_CDR_sequence_hf(self,type,pn,desc,filter,diss):
self.st.out(self.template_get_CDR_sequence_hf, hfname=pn, dissector_name=diss, descname=desc, filtername=filter)
if self.isSeqNativeType(type.unalias().seqType()):
self.getCDR_hf(type.unalias().seqType(), desc, filter, pn)
def get_CDR_alias_hf(self, type, desc, filter, pn):
if self.DEBUG:
print("//XXX get_CDR_alias_hf, type = ", type, " pn = ", pn)
print("//XXX get_CDR_alias_hf, type.decl() = ", type.decl())
print("//XXX get_CDR_alias_hf, type.decl().alias() = ", type.decl().alias())
decl = type.decl() # get declarator object
if decl.sizes(): # a typedef array
#indices = self.get_indices_from_sizes(decl.sizes())
#string_indices = '%i ' % indices # convert int to string
#self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
#self.st.out(self.template_get_CDR_array_start, aname=pn, aval=string_indices)
#self.addvar(self.c_i + pn + ";")
#self.st.inc_indent()
self.getCDR_hf(type.decl().alias().aliasType(), desc, filter, pn)
#self.st.dec_indent()
#self.st.out(self.template_get_CDR_array_end)
else: # a simple typdef
if self.DEBUG:
print("//XXX get_CDR_alias_hf, type = ", type, " pn = ", pn)
print("//XXX get_CDR_alias_hf, type.decl() = ", type.decl())
#self.getCDR_hf(type.unalias(), desc, filter, decl.identifier() )
self.getCDR_hf(type.unalias(), desc, filter, pn)
def genUnionHelper(self, un):
"""Code to generate Union Helper functions
in: un - a union node
"""
if self.DEBUG:
print("//XXX genUnionHelper called")
print("//XXX Union type =", un)
print("//XXX Union type.switchType()", un.switchType())
print("//XXX Union Scoped Name", un.scopedName())
print("//XXX Union switchType.unalias", un.switchType().unalias())
print("//XXX Union switchType.unalias.kind", un.switchType().unalias().kind())
# check to see if we need an item
un_need_item = False
if un.switchType().unalias().kind() == idltype.tk_enum:
for uc in un.cases(): # for all UnionCase objects in this union
if self.DEBUG:
print("//XXX checking", uc)
if self.isItemVarType(uc.caseType()):
if uc.caseType().unalias().kind() == idltype.tk_sequence:
if uc.caseType().unalias().seqType().kind() == idltype.tk_struct:
un_need_item = True
else:
un_need_item = True
if self.AGGRESSIVE:
un_need_item = True
if self.DEBUG:
print("//XXX need_item =", un_need_item)
sname = self.namespace(un, "_")
self.curr_sname = sname # update current opnode/exnode/stnode/unnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
if un_need_item:
self.st.out(self.template_union_helper_function_start_with_item, sname=sname, unname=un.repoId())
else:
self.st.out(self.template_union_helper_function_start, sname=sname, unname=un.repoId())
self.st.inc_indent()
if len(self.fn_hash[sname]) > 0:
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end_item)
st = un.switchType().unalias() # may be typedef switch type, so find real type
self.st.out(self.template_comment_union_code_start, uname=un.repoId())
self.getCDR(st, sname + "_" + un.identifier())
# Depending on what kind of discriminant I come accross (enum,integer,char,
# short, boolean), make sure I cast the return value of the get_XXX accessor
# to an appropriate value. Omniidl idlast.CaseLabel.value() accessor will
# return an integer, or an Enumerator object that is then converted to its
# integer equivalent.
#
#
# NOTE - May be able to skip some of this stuff, but leave it in for now -- FS
#
if st.kind() == idltype.tk_enum:
std = st.decl()
self.st.out(self.template_comment_union_code_discriminant, uname=std.repoId())
# count the number of cases to ensure variable is needed
num = 0
num_defaults = 0
for uc in un.cases(): # for all UnionCase objects in this union
num += len(uc.labels())
for cl in uc.labels():
if cl.default():
num_defaults += 1
if num != 1 or num_defaults != 1:
self.st.out(self.template_union_code_save_discriminant_enum, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_long:
self.st.out(self.template_union_code_save_discriminant_long, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_ulong:
self.st.out(self.template_union_code_save_discriminant_ulong, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_short:
self.st.out(self.template_union_code_save_discriminant_short, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_ushort:
self.st.out(self.template_union_code_save_discriminant_ushort, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_boolean:
self.st.out(self.template_union_code_save_discriminant_boolean, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
elif st.kind() == idltype.tk_char:
self.st.out(self.template_union_code_save_discriminant_char, discname=un.identifier())
self.addvar(self.c_s_disc + un.identifier() + ";")
else:
print("//XXX Unknown st.kind() = ", st.kind())
# Loop over all cases in this union
for uc in un.cases(): # for all UnionCase objects in this union
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
# get integer value, even if discriminant is
# an Enumerator node
if isinstance(cl.value(), idlast.Enumerator):
if self.DEBUG:
print("//XXX clv.identifier()", cl.value().identifier())
print("//XXX clv.repoId()", cl.value().repoId())
print("//XXX clv.scopedName()", cl.value().scopedName())
# find index of enumerator in enum declaration
# eg: RED is index 0 in enum Colors { RED, BLUE, GREEN }
clv = self.valFromEnum(std, cl.value())
else:
clv = cl.value()
#print "//XXX clv = ",clv
# if char, don't convert to int, but put inside single quotes so that it is understood by C.
# eg: if (disc == 'b')..
#
# TODO : handle \xxx chars generically from a function or table lookup rather than
# a whole bunch of "if" statements. -- FS
if st.kind() == idltype.tk_char:
if clv == '\n':
string_clv = "'\\n'"
elif clv == '\t':
string_clv = "'\\t'"
else:
string_clv = "'" + clv + "'"
else:
string_clv = '%i ' % clv
# If default case, then skp comparison with discriminator
if not cl.default():
self.st.out(self.template_comment_union_code_label_compare_start,
discname=un.identifier(), labelval=string_clv)
self.st.inc_indent()
else:
self.st.out(self.template_comment_union_code_label_default_start)
self.getCDR(uc.caseType(), sname + "_" + uc.declarator().identifier())
if not cl.default():
self.st.dec_indent()
self.st.out(self.template_comment_union_code_label_compare_end)
else:
self.st.out(self.template_comment_union_code_label_default_end)
self.st.dec_indent()
self.st.out(self.template_union_helper_function_end)
def get_CDR_alias(self, type, pn):
"""Currently, get_CDR_alias is geared to finding typedef"""
if self.DEBUG:
print("//XXX get_CDR_alias, type = ", type, " pn = ", pn)
print("//XXX get_CDR_alias, type.decl() = ", type.decl())
print("//XXX get_CDR_alias, type.decl().alias() = ", type.decl().alias())
decl = type.decl() # get declarator object
if decl.sizes(): # a typedef array
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=pn, asize=string_indices)
arr_nonce = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(12))
self.st.out(self.template_get_CDR_array_start, nonce=arr_nonce, aname=pn, aval=string_indices)
self.st.inc_indent()
self.addvar(self.c_i + pn + ";")
self.st.inc_indent()
self.getCDR(type.decl().alias().aliasType(), pn)
self.st.dec_indent()
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end, nonce=arr_nonce)
else: # a simple typdef
if self.DEBUG:
print("//XXX type", type.__dict__)
print("//XXX type.unalias()", type.unalias().__dict__)
print("//XXX type.unalias().kind()", type.unalias().kind())
print("//XXX type.decl()", type.decl().__dict__)
self.getCDR(type.unalias(), pn)
def get_CDR_struct(self, type, pn):
"""Handle structs, including recursive"""
# If I am a typedef struct {..}; node then find the struct node
if isinstance(type.decl(), idlast.Declarator):
ntype = type.decl().alias().aliasType().decl()
else:
ntype = type.decl() # I am a struct node
sname = self.namespace(ntype, "_")
self.st.out(self.template_structure_start, name=sname)
# Output a call to the struct helper function so I can handle recursive structs also.
self.st.out(self.template_decode_struct, name=sname)
self.st.out(self.template_structure_end, name=sname)
def genStructHelper(self, st):
"""Generate private helper functions to decode a struct
in: stnode ( a struct node)
"""
if self.DEBUG:
print("//XXX genStructHelper")
sname = self.namespace(st, "_")
self.curr_sname = sname # update current opnode/exnode/stnode scoped name
if not self.fn_hash_built:
self.fn_hash[sname] = [] # init empty list as val for this sname key
# but only if the fn_hash is not already built
self.st.out(self.template_struct_helper_function_start, sname=sname, stname=st.repoId())
self.st.inc_indent()
if len(self.fn_hash[sname]) > 0:
self.st.out(self.template_helper_function_vars_start)
self.dumpCvars(sname)
self.st.out(self.template_helper_function_vars_end_item)
for m in st.members():
for decl in m.declarators():
if decl.sizes(): # an array
arr_nonce = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(12))
indices = self.get_indices_from_sizes(decl.sizes())
string_indices = '%i ' % indices # convert int to string
self.st.out(self.template_get_CDR_array_comment, aname=decl.identifier(), asize=string_indices)
self.st.out(self.template_get_CDR_array_start, nonce=arr_nonce, aname=decl.identifier(), aval=string_indices)
self.st.inc_indent()
self.addvar(self.c_i + decl.identifier() + ";")
self.st.inc_indent()
self.getCDR(m.memberType(), sname + "_" + decl.identifier())
self.st.dec_indent()
self.st.dec_indent()
self.st.out(self.template_get_CDR_array_end, nonce=arr_nonce)
else:
self.getCDR(m.memberType(), sname + "_" + decl.identifier())
self.st.dec_indent()
self.st.out(self.template_struct_helper_function_end)
def get_CDR_sequence(self,type,pn):
"""Generate code to access a sequence of a type"""
if self.DEBUG:
print("//XXX get_CDR_sequence")
self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
seq_nonce = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(12))
self.st.out(self.template_get_CDR_sequence_loop_start, nonce=seq_nonce, seqname=pn)
self.addvar(self.c_i_lim + pn + ";")
self.addvar(self.c_i + pn + ";")
self.st.inc_indent()
self.st.inc_indent()
self.getCDR(type.unalias().seqType(), pn) # and start all over with the type
self.st.dec_indent()
self.st.dec_indent()
self.st.out(self.template_get_CDR_sequence_loop_end, nonce=seq_nonce)
def get_CDR_sequence_octet(self, type, pn):
"""Generate code to access a sequence of octet"""
if self.DEBUG:
print("//XXX get_CDR_sequence_octet")
self.st.out(self.template_get_CDR_sequence_length, seqname=pn)
self.st.out(self.template_get_CDR_sequence_octet, seqname=pn)
self.addvar(self.c_i_lim + pn + ";")
self.addvar("const guint8 * binary_seq_" + pn + ";")
self.addvar("gchar * text_seq_" + pn + ";")
@staticmethod
def namespace(node, sep):
"""in - op node
out - scoped operation name, using sep character instead of "::"
eg: Penguin::Echo::echoWString => Penguin_Echo_echoWString if sep = "_"
"""
sname = idlutil.ccolonName(node.scopedName()).replace('::', sep)
#print("//XXX namespace: sname = " + sname)
return sname
def gen_plugin_register(self):
"""generate code for plugin initialisation"""
self.st.out(self.template_plugin_register, description=self.description,
protocol_name=self.protoname, dissector_name=self.dissname)
# TODO - make this a command line option
#
# -e explicit
# -h heuristic
def gen_proto_reg_handoff(self, oplist):
"""generate register_giop_user_module code, and register only
unique interfaces that contain operations. Also output
a heuristic register in case we want to use that."""
self.st.out(self.template_proto_reg_handoff_start, dissector_name=self.dissname)
self.st.inc_indent()
for iname in self.get_intlist(oplist):
self.st.out(self.template_proto_reg_handoff_body, dissector_name=self.dissname,
protocol_name=self.protoname, interface=iname)
self.st.out(self.template_proto_reg_handoff_heuristic, dissector_name=self.dissname,
protocol_name=self.protoname)
self.st.dec_indent()
self.st.out(self.template_proto_reg_handoff_end)
def genOp_hf(self, op):
"""generate hf_ array element for operation, attribute, enums, struct and union lists"""
sname = self.namespace(op, "_")
opname = sname[sname.find("_")+1:]
opname = opname[:opname.find("_")]
rt = op.returnType()
if rt.kind() != idltype.tk_void:
if rt.kind() == idltype.tk_alias: # a typdef return val possibly ?
self.getCDR_hf(rt, rt.name(),
opname + "." + op.identifier() + ".return", sname + "_return")
else:
self.getCDR_hf(rt, "Return value",
opname + "." + op.identifier() + ".return", sname + "_return")
for p in op.parameters():
self.getCDR_hf(p.paramType(),
p.identifier(),
opname + "." + op.identifier() + "." + p.identifier(),
sname + "_" + p.identifier())
def genAt_hf(self, at):
for decl in at.declarators():
sname = self.namespace(decl, "_")
atname = sname[sname.find("_")+1:]
atname = atname[:atname.find("_")]
self.getCDR_hf(at.attrType(), decl.identifier(),
atname + "." + decl.identifier() + ".get", "get" + "_" + sname + "_" + decl.identifier())
if not at.readonly():
self.getCDR_hf(at.attrType(), decl.identifier(),
atname + "." + decl.identifier() + ".set", "set" + "_" + sname + "_" + decl.identifier())
def genSt_hf(self, st):
sname = self.namespace(st, "_")
stname = sname[sname.find("_")+1:]
stname = stname[:stname.find("_")]
for m in st.members():
for decl in m.declarators():
self.getCDR_hf(m.memberType(), st.identifier() + "_" + decl.identifier(),
st.identifier() + "." + decl.identifier(), sname + "_" + decl.identifier())
def genEx_hf(self, ex):
sname = self.namespace(ex, "_")
exname = sname[sname.find("_")+1:]
exname = exname[:exname.find("_")]
for m in ex.members():
for decl in m.declarators():
self.getCDR_hf(m.memberType(), ex.identifier() + "_" + decl.identifier(),
exname + "." + ex.identifier() + "_" + decl.identifier(), sname + "_" + decl.identifier())
def genUnion_hf(self, un):
sname = self.namespace(un, "_")
unname = sname[:sname.rfind("_")]
unname = unname.replace("_", ".")
if self.DEBUG:
print("//XXX genUnion_hf")
print("// sname =", sname)
print("// uname =", unname)
self.getCDR_hf(un.switchType().unalias(), un.identifier(),
unname + "." + un.identifier(), sname + "_" + un.identifier(), sname)
for uc in un.cases(): # for all UnionCase objects in this union
# TODO: is this loop necessary?
for cl in uc.labels(): # for all Caselabel objects in this UnionCase
self.getCDR_hf(uc.caseType(), un.identifier() + "_" + uc.declarator().identifier(),
unname + "." + un.identifier() + "." + uc.declarator().identifier(),
sname + "_" + uc.declarator().identifier())
def gen_proto_register(self, oplist, atlist, stlist, unlist):
"""generate proto_register_<protoname> code,
in - oplist[], atlist[], stline[], unlist[]
"""
self.st.out(self.template_proto_register_start, dissector_name=self.dissname)
# operation specific filters
self.st.out(self.template_proto_register_op_filter_comment)
for op in oplist:
self.genOp_hf(op)
# attribute filters
self.st.out(self.template_proto_register_at_filter_comment)
for at in atlist:
self.genAt_hf(at)
# struct filters
self.st.out(self.template_proto_register_st_filter_comment)
for st in stlist:
if st.members(): # only if has members
self.genSt_hf(st)
# exception List filters
exlist = self.get_exceptionList(oplist) # grab list of exception nodes
self.st.out(self.template_proto_register_ex_filter_comment)
for ex in exlist:
if ex.members(): # only if has members
self.genEx_hf(ex)
# Union filters
self.st.out(self.template_proto_register_un_filter_comment)
for un in unlist:
self.genUnion_hf(un)
self.st.out(self.template_proto_register_end, description=self.description,
protocol_name=self.protoname, dissector_name=self.dissname)
@staticmethod
def get_intlist(oplist):
"""in - oplist[]
out - a list of unique interface names. This will be used in
register_giop_user_module(dissect_giop_auto, "TEST IDL", "Penguin/Echo" ); so the operation
name must be removed from the scope. And we also only want unique interfaces.
"""
int_hash = {} # holds a hash of unique interfaces
for op in oplist:
sc = op.scopedName() # eg: penguin,tux,bite
sc1 = sc[:-1]
sn = idlutil.slashName(sc1) # penguin/tux
if sn not in int_hash:
int_hash[sn] = 0 # dummy val, but at least key is unique
ret = list(int_hash.keys())
ret.sort()
return ret
def get_exceptionList(self, oplist):
"""in - oplist[]
out - a list of exception nodes (unique). This will be used in
to generate dissect_exception_XXX functions.
"""
ex_hash = collections.OrderedDict() # holds a hash of unique exceptions.
for op in oplist:
for ex in op.raises():
if ex not in ex_hash:
ex_hash[ex] = 0 # dummy val, but at least key is unique
if self.DEBUG:
print("//XXX Exception = " + ex.identifier())
ret = list(ex_hash.keys())
return ret
@staticmethod
def get_indices_from_sizes(sizelist):
"""Simple function to take a list of array sizes and find the total number of elements
eg: temp[4][3] = 12 elements
"""
val = 1
for i in sizelist:
val = val * i
return val
@staticmethod
def dig_to_len(dignum):
"""Determine how many octets contain requested number
of digits for an "fixed" IDL type "on the wire" """
return (dignum/2) + 1
def genTODO(self, message):
self.st.out(self.template_debug_TODO, message=message)
def genWARNING(self, message):
self.st.out(self.template_debug_WARNING, message=message)
# Templates for C code
template_helper_function_comment = """\
/*
* @repoid@
*/"""
template_helper_function_vars_start = """\
/* Operation specific Variable declarations Begin */"""
template_helper_function_vars_end = """\
/* Operation specific Variable declarations End */
"""
template_helper_function_vars_end_item = """\
/* Operation specific Variable declarations End */
"""
template_helper_function_start = """\
static void
decode_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{"""
template_helper_function_end = """\
}
"""
template_proto_reg_handoff_start = """\
/* register me as handler for these interfaces */
void proto_reg_handoff_giop_@dissector_name@(void)
{"""
template_proto_reg_handoff_body = """\
/* Register for Explicit Dissection */
register_giop_user_module(dissect_@dissector_name@, \"@protocol_name@\", \"@interface@\", proto_@dissector_name@ ); /* explicit dissector */
"""
template_proto_reg_handoff_heuristic = """\
/* Register for Heuristic Dissection */
register_giop_user(dissect_@dissector_name@, \"@protocol_name@\" ,proto_@dissector_name@); /* heuristic dissector */
"""
template_proto_reg_handoff_end = """\
}
"""
template_prototype = """
void proto_register_giop_@dissector_name@(void);
void proto_reg_handoff_giop_@dissector_name@(void);"""
# Initialize the protocol
# template_protocol = """
#/* Initialise the protocol and subtree pointers */
#static int proto_@dissector_name@ = -1;
#static gint ett_@dissector_name@ = -1;
#"""
template_protocol = """
/* Initialise the protocol and subtree pointers */
static int proto_@dissector_name@ = -1;
static gint ett_@dissector_name@ = -1;
static int ett_giop_struct = -1;
static int ett_giop_sequence = -1;
static int ett_giop_array = -1;
static int ett_giop_union = -1;
"""
template_init_boundary = """
/* Initialise the initial Alignment */
static guint32 boundary = GIOP_HEADER_SIZE; /* initial value */"""
# plugin_register and plugin_reg_handoff templates
template_plugin_register = """
#if 0
WS_DLL_PUBLIC_DEF void
plugin_register(void)
{
if (proto_@dissector_name@ == -1) {
proto_register_giop_@dissector_name@();
}
}
WS_DLL_PUBLIC_DEF void
plugin_reg_handoff(void){
proto_register_handoff_giop_@dissector_name@();
}
#endif
"""
template_proto_register_start = """
/* Register the protocol with Wireshark */
void proto_register_giop_@dissector_name@(void)
{
/* setup list of header fields */
static hf_register_info hf[] = {
/* field that indicates the currently ongoing request/reply exchange */
{&hf_operationrequest, {"Request_Operation","giop-@[email protected]_Operation",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_proto_register_end = """
};
static ei_register_info ei[] = {
{ &ei_@dissector_name@_unknown_giop_msg, { "giop-@[email protected]_giop_msg", PI_PROTOCOL, PI_WARN, "Unknown GIOP message", EXPFILL }},
{ &ei_@dissector_name@_unknown_exception, { "giop-@[email protected]_exception", PI_PROTOCOL, PI_WARN, "Unknown exception", EXPFILL }},
{ &ei_@dissector_name@_unknown_reply_status, { "giop-@[email protected]_reply_status", PI_PROTOCOL, PI_WARN, "Unknown reply status", EXPFILL }},
};
/* setup protocol subtree array */
static gint *ett[] = {
&ett_@dissector_name@,
&ett_giop_struct,
&ett_giop_sequence,
&ett_giop_array,
&ett_giop_union,
};
expert_module_t* expert_@dissector_name@;
/* Register the protocol name and description */
proto_@dissector_name@ = proto_register_protocol(\"@description@\" , \"GIOP/@protocol_name@\", \"giop-@dissector_name@\" );
proto_register_field_array(proto_@dissector_name@, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
expert_@dissector_name@ = expert_register_protocol(proto_@dissector_name@);
expert_register_field_array(expert_@dissector_name@, ei, array_length(ei));
}
"""
template_proto_register_op_filter_comment = """\
/* Operation filters */"""
template_proto_register_at_filter_comment = """\
/* Attribute filters */"""
template_proto_register_st_filter_comment = """\
/* Struct filters */"""
template_proto_register_ex_filter_comment = """\
/* User exception filters */"""
template_proto_register_un_filter_comment = """\
/* Union filters */"""
template_proto_register_ei_filters = """\
/* Expert info filters */
static expert_field ei_@dissector_name@_unknown_giop_msg = EI_INIT;
static expert_field ei_@dissector_name@_unknown_exception = EI_INIT;
static expert_field ei_@dissector_name@_unknown_reply_status = EI_INIT;
"""
# template for delegation code
template_op_delegate_code = """\
if (strcmp(operation, "@opname@") == 0
&& (!idlname || strcmp(idlname, \"@interface@\") == 0)) {
item = process_RequestOperation(tvb, pinfo, ptree, header, operation); /* fill-up Request_Operation field & info column */
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_@sname@(tvb, pinfo, tree, item, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_no_ops_to_delegate = """\
// NOTE: this should only appear if your IDL has absolutely no operations
if (!idlname) {
return FALSE;
}
"""
# Templates for the helper functions
template_helper_switch_msgtype_start = """\
switch(header->message_type) {"""
template_helper_switch_msgtype_default_start = """\
default:
/* Unknown GIOP Message */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_giop_msg, "Unknown GIOP message %d", header->message_type);"""
template_helper_switch_msgtype_default_end = """\
break;"""
template_helper_switch_msgtype_end = """\
} /* switch(header->message_type) */"""
template_helper_switch_msgtype_request_start = """\
case Request:"""
template_helper_switch_msgtype_request_end = """\
break;"""
template_helper_switch_msgtype_reply_start = """\
case Reply:"""
template_helper_switch_msgtype_reply_no_exception_start = """\
case NO_EXCEPTION:"""
template_helper_switch_msgtype_reply_no_exception_end = """\
break;"""
template_helper_switch_msgtype_reply_user_exception_start = """\
case USER_EXCEPTION:"""
template_helper_switch_msgtype_reply_user_exception_end = """\
break;"""
template_helper_switch_msgtype_reply_default_start = """\
default:
/* Unknown Exception */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_exception, "Unknown exception %d", header->rep_status);"""
template_helper_switch_msgtype_reply_default_end = """\
break;"""
template_helper_switch_msgtype_reply_end = """\
break;"""
template_helper_switch_rep_status_start = """\
switch(header->rep_status) {"""
template_helper_switch_rep_status_default_start = """\
default:
/* Unknown Reply Status */
expert_add_info_format(pinfo, item, &ei_@dissector_name@_unknown_reply_status, "Unknown reply status %d", header->rep_status);"""
template_helper_switch_rep_status_default_end = """\
break;"""
template_helper_switch_rep_status_end = """\
} /* switch(header->rep_status) */
break;"""
# Templates for get_CDR_xxx accessors
template_get_CDR_ulong = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_ulong(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_short = """\
proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_short(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_void = """\
/* Function returns void */
"""
template_get_CDR_long = """\
proto_tree_add_int(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_ushort = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-2, 2, get_CDR_ushort(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_float = """\
proto_tree_add_float(tree, hf_@hfname@, tvb, *offset-4, 4, get_CDR_float(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_double = """\
proto_tree_add_double(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_double(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_longlong = """\
proto_tree_add_int64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_long_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_ulonglong = """\
proto_tree_add_uint64(tree, hf_@hfname@, tvb, *offset-8, 8, get_CDR_ulong_long(tvb,offset,stream_is_big_endian, boundary));
"""
template_get_CDR_boolean = """\
proto_tree_add_boolean(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_boolean(tvb,offset));
"""
template_get_CDR_char = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_char(tvb,offset));
"""
template_get_CDR_octet = """\
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-1, 1, get_CDR_octet(tvb,offset));
"""
template_get_CDR_any = """\
get_CDR_any(tvb, pinfo, tree, item, offset, stream_is_big_endian, boundary, header);
"""
template_get_CDR_fixed = """\
get_CDR_fixed(tvb, pinfo, item, &seq, offset, @digits@, @scale@);
proto_tree_add_string_format_value(tree, hf_@hfname@, tvb, *offset-@length@, @length@, seq, "< @digits@, @scale@> = %s", seq);
"""
template_get_CDR_enum_symbolic = """\
u_octet4 = get_CDR_enum(tvb,offset,stream_is_big_endian, boundary);
proto_tree_add_uint(tree, hf_@hfname@, tvb, *offset-4, 4, u_octet4);
"""
template_get_CDR_string = """\
giop_add_CDR_string(tree, tvb, offset, stream_is_big_endian, boundary, hf_@hfname@);
"""
template_get_CDR_wstring = """\
u_octet4 = get_CDR_wstring(tvb, &seq, offset, stream_is_big_endian, boundary, header);
proto_tree_add_string(tree, hf_@hfname@, tvb, *offset-u_octet4, u_octet4, (u_octet4 > 0) ? seq : \"\");
"""
template_get_CDR_wchar = """\
s_octet1 = get_CDR_wchar(tvb, &seq, offset, header);
if (tree) {
if (s_octet1 > 0)
proto_tree_add_uint(tree, hf_@hfname@_len, tvb, *offset-1-s_octet1, 1, s_octet1);
if (s_octet1 < 0)
s_octet1 = -s_octet1;
if (s_octet1 > 0)
proto_tree_add_string(tree, hf_@hfname@, tvb, *offset-s_octet1, s_octet1, seq);
}
"""
template_get_CDR_TypeCode = """\
u_octet4 = get_CDR_typeCode(tvb, pinfo, tree, offset, stream_is_big_endian, boundary, header);
"""
template_get_CDR_object = """\
get_CDR_object(tvb, pinfo, tree, offset, stream_is_big_endian, boundary);
"""
template_get_CDR_sequence_length = """\
u_octet4_loop_@seqname@ = get_CDR_ulong(tvb, offset, stream_is_big_endian, boundary);
proto_tree_add_uint(tree, hf_@seqname@_loop, tvb,*offset-4, 4, u_octet4_loop_@seqname@);
"""
template_get_CDR_sequence_length_item = """\
u_octet4_loop_@seqname@ = get_CDR_ulong(tvb, offset, stream_is_big_endian, boundary);
item = proto_tree_add_uint(tree, hf_@seqname@_loop, tvb,*offset-4, 4, u_octet4_loop_@seqname@);
"""
template_get_CDR_sequence_loop_start = """\
{
proto_tree *tree_bak_@nonce@ = tree;
tree = proto_tree_add_subtree(tree, tvb, *offset, -1, ett_giop_sequence, NULL, "sequence @seqname@");
for (i_@seqname@=0; i_@seqname@ < u_octet4_loop_@seqname@; i_@seqname@++) {
"""
template_get_CDR_sequence_loop_end = """\
}
tree = tree_bak_@nonce@;
}
"""
template_get_CDR_sequence_octet = """\
if (u_octet4_loop_@seqname@ > 0 && tree) {
get_CDR_octet_seq(tvb, &binary_seq_@seqname@, offset,
u_octet4_loop_@seqname@);
text_seq_@seqname@ = make_printable_string(binary_seq_@seqname@,
u_octet4_loop_@seqname@);
proto_tree_add_bytes_format_value(tree, hf_@seqname@, tvb, *offset - u_octet4_loop_@seqname@,
u_octet4_loop_@seqname@, binary_seq_@seqname@, \"%s\", text_seq_@seqname@);
}
"""
template_get_CDR_array_start = """\
{
proto_tree *tree_bak_@nonce@ = tree;
tree = proto_tree_add_subtree(tree, tvb, *offset, -1, ett_giop_array, NULL, "array @aname@");
for (i_@aname@=0; i_@aname@ < @aval@; i_@aname@++) {
"""
template_get_CDR_array_end = """\
}
tree = tree_bak_@nonce@;
}
"""
template_get_CDR_array_comment = """\
/* Array: @aname@[ @asize@] */
"""
template_structure_start = """\
{ /* Begin struct \"@name@\" */
proto_tree *struct_tree = proto_tree_add_subtree(tree, tvb, *offset, -1, ett_giop_struct, NULL, "struct @name@");
"""
template_structure_end = """\
} /* End struct \"@name@\" */"""
template_union_start = """\
{ /* Begin union \"@name@\" */
proto_tree *union_tree = proto_tree_add_subtree(tree, tvb, *offset, -1, ett_giop_union, NULL, "union @name@");
"""
template_union_end = """\
} /* End union \"@name@\" */"""
# Templates for get_CDR_xxx_hf accessors
template_get_CDR_ulong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ulong_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_short_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_short_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT16,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_long_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_long_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT32,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_ushort_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ushort_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT16,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_float_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_FLOAT,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_double_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_DOUBLE,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_fixed_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_longlong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_longlong_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_INT64,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_ulonglong_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT64,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_ulonglong_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT64,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_boolean_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_BOOLEAN,8,NULL,0x01,NULL,HFILL}},"""
template_get_CDR_char_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_char_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_octet_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT8,BASE_HEX,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_enum_symbolic_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,VALS(@valstringarray@),0x0,NULL,HFILL}},"""
template_get_CDR_string_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_wstring_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_wchar_hf = """\
{&hf_@hfname@_len, {"@descname@ Length","giop-@dissector_name@.@[email protected]",FT_UINT8,BASE_DEC,NULL,0x0,NULL,HFILL}},
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_STRING,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_TypeCode_hf = """\
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_sequence_hf = """\
{&hf_@hfname@_loop, {"Seq length of @descname@","giop-@dissector_name@.@[email protected]",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},"""
template_get_CDR_sequence_octet_hf = """\
{&hf_@hfname@_loop, {"Seq length of @descname@","giop-@dissector_name@.@[email protected]",FT_UINT32,BASE_DEC,NULL,0x0,NULL,HFILL}},
{&hf_@hfname@, {"@descname@","giop-@dissector_name@.@filtername@",FT_BYTES,BASE_NONE,NULL,0x0,NULL,HFILL}},"""
template_Header = """\
/* packet-@[email protected]
*
* Routines for IDL dissection
*
* Autogenerated from idl2wrs
* Copyright 2001 Frank Singleton <frank.singleton@@ericsson.com>
*/
"""
template_wireshark_copyright = """\
/*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@@wireshark.org>
* Copyright 1998 Gerald Combs
*/
"""
template_GPL = """\
/*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
"""
template_Modelines = """\
/*
* Editor modelines - https://www.wireshark.org/tools/modelines.html
*
* Local Variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* ex: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/"""
template_Includes = """\
#include "config.h"
#include <string.h>
#include <epan/packet.h>
#include <epan/proto.h>
#include <epan/dissectors/packet-giop.h>
#include <epan/expert.h>
#include "ws_diag_control.h"
#include "ws_compiler_tests.h"
#ifdef _MSC_VER
/* disable warning: "unreference local variable" */
#pragma warning(disable:4101)
#endif
/* XXX this should be autogenerated, or the warnings fixed in the generator */
DIAG_OFF(unused-function)
DIAG_OFF(unused-variable)
#if WS_IS_AT_LEAST_GNUC_VERSION(6,0)
DIAG_OFF(unused-const-variable)
#endif"""
template_main_dissector_start = """\
/*
* Called once we accept the packet as being for us; it sets the
* Protocol and Info columns and creates the top-level protocol
* tree item.
*/
static proto_tree *
start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset)
{
proto_item *ti = NULL;
proto_tree *tree = NULL; /* init later, inside if(tree) */
col_set_str(pinfo->cinfo, COL_PROTOCOL, \"@disprot@\");
/*
* Do not clear COL_INFO, as nothing is being written there by
* this dissector yet. So leave it as is from the GIOP dissector.
* TODO: add something useful to COL_INFO
* col_clear(pinfo->cinfo, COL_INFO);
*/
if (ptree) {
ti = proto_tree_add_item(ptree, proto_@dissname@, tvb, *offset, tvb_reported_length_remaining(tvb, *offset), ENC_NA);
tree = proto_item_add_subtree(ti, ett_@dissname@);
}
return tree;
}
static proto_item*
process_RequestOperation(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, MessageHeader *header, const gchar *operation)
{
proto_item *pi;
if(header->message_type == Reply) {
/* fill-up info column */
col_append_fstr(pinfo->cinfo, COL_INFO, " op = %s",operation);
}
/* fill-up the field */
pi=proto_tree_add_string(ptree, hf_operationrequest, tvb, 0, 0, operation);
proto_item_set_generated(pi);
return pi;
}
static gboolean
dissect_@dissname@(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset, MessageHeader *header, const gchar *operation, gchar *idlname)
{
proto_item *item _U_;
proto_tree *tree _U_;
gboolean stream_is_big_endian = is_big_endian(header); /* get endianess */
/* If we have a USER Exception, then decode it and return */
if ((header->message_type == Reply) && (header->rep_status == USER_EXCEPTION)) {
return decode_user_exception(tvb, pinfo, ptree, offset, header, operation, stream_is_big_endian);
}
"""
template_main_dissector_switch_msgtype_start = """\
switch(header->message_type) {
"""
template_main_dissector_switch_msgtype_start_request_reply = """\
case Request:
case Reply:
"""
template_main_dissector_switch_msgtype_end_request_reply = """\
break;
"""
template_main_dissector_switch_msgtype_all_other_msgtype = """\
case CancelRequest:
case LocateRequest:
case LocateReply:
case CloseConnection:
case MessageError:
case Fragment:
return FALSE; /* not handled yet */
default:
return FALSE; /* not handled yet */
} /* switch */
"""
template_main_dissector_end = """\
return FALSE;
} /* End of main dissector */
"""
#-------------------------------------------------------------#
# Exception handling templates #
#-------------------------------------------------------------#
template_exception_helpers_start = """\
/* Begin Exception Helper Functions */
"""
template_exception_helpers_end = """\
/* End Exception Helper Functions */
"""
template_main_exception_delegator_start = """\
/*
* Main delegator for exception handling
*
*/
static gboolean
decode_user_exception(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *ptree _U_, int *offset _U_, MessageHeader *header, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_tree *tree _U_;
if (!header->exception_id)
return FALSE;
"""
template_ex_delegate_code = """\
if (strcmp(header->exception_id, "@exname@") == 0) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_ex_@sname@(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian); /* @exname@ */
return TRUE;
}
"""
template_main_exception_delegator_end = """
return FALSE; /* user exception not found */
}
"""
template_exception_helper_function_start = """\
/* Exception = @exname@ */
static void
decode_ex_@sname@(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
"""
template_exception_helper_function_end = """\
}
"""
template_struct_helper_function_start = """\
/* Struct = @stname@ */
static void
decode_@sname@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
"""
template_struct_helper_function_end = """\
}
"""
template_union_helper_function_start = """\
/* Union = @unname@ */
static void
decode_@sname@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
"""
template_union_helper_function_start_with_item = """\
/* Union = @unname@ */
static void
decode_@sname@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
proto_item* item = NULL;
"""
template_union_helper_function_end = """\
}
"""
#-------------------------------------------------------------#
# Value string templates #
#-------------------------------------------------------------#
template_value_string_start = """\
static const value_string @valstringname@[] = {
"""
template_value_string_entry = """\
{ @intval@, \"@description@\" },"""
template_value_string_end = """\
{ 0, NULL },
};
"""
#-------------------------------------------------------------#
# Enum handling templates #
#-------------------------------------------------------------#
template_comment_enums_start = """\
/*
* IDL Enums Start
*/
"""
template_comment_enums_end = """\
/*
* IDL Enums End
*/
"""
template_comment_enum_comment = """\
/*
* Enum = @ename@
*/"""
#-------------------------------------------------------------#
# Attribute handling templates #
#-------------------------------------------------------------#
template_comment_attributes_start = """\
/*
* IDL Attributes Start
*/
"""
# get/set accessor method names are language mapping dependent.
template_attributes_declare_Java_get = """static const char get_@sname@_at[] = \"_get_@atname@\" ;"""
template_attributes_declare_Java_set = """static const char set_@sname@_at[] = \"_set_@atname@\" ;"""
template_comment_attributes_end = """
/*
* IDL Attributes End
*/
"""
# template for Attribute delegation code
#
# Note: _get_xxx() should only be called for Reply with NO_EXCEPTION
# Note: _set_xxx() should only be called for Request
template_at_delegate_code_get = """\
if (strcmp(operation, get_@sname@_at) == 0 && (header->message_type == Reply) && (header->rep_status == NO_EXCEPTION) ) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_get_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_at_delegate_code_set = """\
if (strcmp(operation, set_@sname@_at) == 0 && (header->message_type == Request) ) {
tree = start_dissecting(tvb, pinfo, ptree, offset);
decode_set_@sname@_at(tvb, pinfo, tree, offset, header, operation, stream_is_big_endian);
return TRUE;
}
"""
template_attribute_helpers_start = """\
/* Begin Attribute Helper Functions */
"""
template_attribute_helpers_end = """\
/* End Attribute Helper Functions */
"""
template_attribute_helper_function_start = """\
/* Attribute = @atname@ */
static void
decode_@sname@_at(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_)
{
"""
template_attribute_helper_function_end = """\
}
"""
#-------------------------------------------------------------#
# Debugging templates #
#-------------------------------------------------------------#
# Template for outputting TODO "C" comments
# so user know I need to improve something.
template_debug_TODO = """\
/* TODO - @message@ */
"""
# Template for outputting WARNING "C" comments
# so user know if I have found a problem.
template_debug_WARNING = """\
/* WARNING - @message@ */
"""
#-------------------------------------------------------------#
# IDL Union templates #
#-------------------------------------------------------------#
template_comment_union_code_start = """\
/*
* IDL Union Start - @uname@
*/
"""
template_comment_union_code_end = """
/*
* IDL union End - @uname@
*/
"""
template_comment_union_code_discriminant = """\
/*
* IDL Union - Discriminant - @uname@
*/
"""
# Cast Unions types to something appropriate
# Enum value cast to guint32, all others cast to gint32
# as omniidl accessor returns integer or Enum.
template_union_code_save_discriminant_enum = """\
disc_s_@discname@ = (gint32) u_octet4; /* save Enum Value discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_long = """\
*offset -= 4; // rewind
disc_s_@discname@ = (gint32) get_CDR_long(tvb,offset,stream_is_big_endian, boundary); /* save gint32 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_ulong = """\
*offset -= 4; // rewind
disc_s_@discname@ = (gint32) get_CDR_ulong(tvb,offset,stream_is_big_endian, boundary); /* save guint32 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_short = """\
*offset -= 2; // rewind
disc_s_@discname@ = (gint32) get_CDR_short(tvb,offset,stream_is_big_endian, boundary); /* save gint16 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_ushort = """\
*offset -= 2; // rewind
disc_s_@discname@ = (gint32) get_CDR_ushort(tvb,offset,stream_is_big_endian, boundary); /* save gint16 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_char = """\
*offset -= 1; // rewind
disc_s_@discname@ = (gint32) get_CDR_char(tvb,offset); /* save guint1 discriminant and cast to gint32 */
"""
template_union_code_save_discriminant_boolean = """\
*offset -= 1; // rewind
disc_s_@discname@ = (gint32) get_CDR_boolean(tvb, offset); /* save guint1 discriminant and cast to gint32 */
"""
template_comment_union_code_label_compare_start = """\
if (disc_s_@discname@ == @labelval@) {
"""
template_comment_union_code_label_compare_end = """\
return; /* End Compare for this discriminant type */
}
"""
template_comment_union_code_label_default_start = """
/* Default Union Case Start */
"""
template_comment_union_code_label_default_end = """\
/* Default Union Case End */
"""
# Templates for function prototypes.
# This is used in genDeclares() for declaring function prototypes
# for structs and union helper functions.
template_hf_operations = """
static int hf_operationrequest = -1;/* Request_Operation field */
"""
template_hf = """\
static int hf_@name@ = -1;"""
template_prototype_start_dissecting = """
static proto_tree *start_dissecting(tvbuff_t *tvb, packet_info *pinfo, proto_tree *ptree, int *offset);
"""
template_prototype_struct_start = """\
/* Struct prototype declaration Start */
"""
template_prototype_struct_end = """\
/* Struct prototype declaration End */
"""
template_prototype_struct_body = """\
/* Struct = @stname@ */
static void decode_@name@_st(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, proto_item *item _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
"""
template_decode_struct = """\
decode_@name@_st(tvb, pinfo, struct_tree, item, offset, header, operation, stream_is_big_endian);"""
template_prototype_union_start = """\
/* Union prototype declaration Start */"""
template_prototype_union_end = """\
/* Union prototype declaration End */"""
template_prototype_union_body = """
/* Union = @unname@ */
static void decode_@name@_un(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, int *offset _U_, MessageHeader *header _U_, const gchar *operation _U_, gboolean stream_is_big_endian _U_);
"""
template_decode_union = """\
decode_@name@_un(tvb, pinfo, union_tree, offset, header, operation, stream_is_big_endian);
"""
template_proto_item = """\
proto_item *item = (proto_item*) wmem_alloc0(wmem_packet_scope(), sizeof(proto_item));
"""
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 expandtab:
# :indentSize=4:noTabs=true:
# |
Text | wireshark/tools/wireshark_words.txt | 0x%02x
0x%08x
10base
10gig
16apsk
1xrtt
3gpp2
3pcap
5views
80211n
80mhz
abbrev
accelerometer
acceptor
accessor
accessors
accuracies
acked
acknack
acknowledgement
acp133
activations
actuator
acyclic
addba
additionals
additionsr
adjacency
adlink
administrable
adpclk
adspec
advatek
adwin
aes128
aes256
aethra
aggregations
aggregator
agnss
aifsn
aironet
airpcap
airtel
alcap
alcatel
alljoyn
alloc
allocators
alteon
ampdu
amperage
ampere
amperes
anacap
analyzers
analyzes
annexc
annunc
anonsvn
anonymization
aperiodic
appdata
appid
appkey
applicability
appset
arbitrated
arduino
arfcn
arista
asciidoc
ashrae
asn1
asn1cnf
asn2deb
asn2wrs
assignee
assignor
assoc
assymetric
async
asynchronously
asyncmap
atheros
atomically
atsss
attendee
attrib
attrs
audigy
authcitrix
authen
authenticates
authenticator
authenticators
authgss
authn
authntransitioning
authorizer
authtoken
authtype
authz
autoconfiguration
autodiscovery
autoneg
autosar
available
avaya
avrcp
bacapp
backend
backhaul
backoff
bacnet
batched
baudrate
bayer
bband
bblog
bcast
beamformed
beamformee
beamformer
beamforming
bgpspec
bibliographic
bibliography
bidirectional
bidirectionally
bigint
binlog
bitfield
bitmask
bitrate
bitstring
blackhole
bnode
boolflag
bootfile
bootloader
bootopt
bootp
broadcom
bsmap
bssap
bssid
bssids
bssmap
btatt
btbredr
btcommon
bthci
btmesh
btsdp
btsnoop
bugzilla
buildbot
builtin
bulleted
butype
byte
byteorder
cablelabs
cadenced
callback
callid
callsign
calorific
canceled
canceling
cancelled
canceller
canfd
canfdmessage
cannot
canonicalised
canonicalize
canonicalized
capab
capacitive
capinfos
caplen
capsa
captioning
capwap
cardbus
carrierfreq
carrierid
casio
categorizes
cblock
cccid
ccitt
ccpch
cctrch
cdma2000
cdmacallmode
cdmachanneldata
celeron
cellid
cellidentity
centiseconds
centrino
cfilters
cframe
chan1
chan2
changelog
channelisation
channelized
charset
charsets
chauvet
checkbox
checkout
checksum
chksum
chmod
choco
chocolatey
choplen
chromaticities
chromaticity
chunked
cicam
cinfo
ciphered
ciphering
ciphersuite
ciphertext
ciplus
cipso
citrix
cksum
classifiers
classmark
classmark3
claypaky
clientkey
clientout
clopts
clsfr
cmake
cmdcontrol
cmstatus
codabar
codebook
codecs
codepoint
codeset
codingrate
codute
collectd
collimation
colocated
coloring
colorise
colorization
colorize
colorized
colorizing
colormap
combi
combiner
combiners
communication
compat
compilable
compilers
compr
computable
concatenate
concatenated
concatenates
concurrent
conferenced
configitem
configurable
conformant
connectionless
connid
connp
const
contactless
contextp
contiguity
contiguously
coord
Coord3D
copycss
copyfile
corba
corrigendum
couchbase
coverity
cpdlc
cpich
cppcheck
cpubus
cpuregisters
cqich
credential
credentials
credssp
criticalextensions
criticalextensionsfuture
crnti
crypto
cryptographic
csapi
ctime
ctxinfo
ctype
cumulated
cumulatively
customizable
customization
customizing
cyphering
daintree
datagram
datagrams
dataitem
datalen
datarate
datastate
datetime
dccreq
dcerpc
dct3trace
deact
deactivated
deactivating
deactivation
deassert
deasserted
deassertion
deauth
deauthenticate
deauthenticated
deauthentication
deauthentications
debian
debug
decapsulation
decca
decentralization
dechunk
decompressing
decompressor
decrement
decremented
decrementing
decrypt
decrypted
decrypting
decryption
dedup
deduplicate
deenabled
deenablement
defragment
defragmentation
defragmented
defragmenting
dehumidification
deinterleaved
delimited
delimiters
delimiting
demodulator
demultiplexed
demultiplexer
demultiplexers
denso
deobfuscated
depassivated
deprecated
deprotection
dequeue
dequeued
dereference
dereferenced
dereferencing
dereg
deregister
deregistered
deregistering
deregistration
derivate
des40
descr
descriptors
desegment
desegmentation
desegmenting
deselect
destip
destport
deutschland
devcap
deviceid
devmode
dfilter
dfilters
dfsauth
dftest
dgotyp
dgram
dhaka
dhcpv
dialed
dialup
diffie
Digicel
digitizer
digium
diplexer
directionality
disambiguate
disambiguation
discriminant
dissection
dissector
dissectors
distinguisher
diversifier
divisor
djiuav
dlmap
dlsch
dmepi
dnskey
docsis
dodag
dot11Qos
dot1q
double
downlink
doxygen
dpauxmon
dpnss
drbid
drdynvc
droppable
dsmcc
dstport
dtwin
dumpcap
duple
dword
dwords
eapol
earcfn
earfcn
ebcdic
ecdhe
ecdsa
ecpri
editcap
eeprom
egprs
egroup
eigrp
einval
elektronik
elided
elink
ellipsoid
encap
encaps
encapsulations
encapsulator
enciphered
encodings
encrypt
encrypting
endace
endian
endianness
endif
endpoint
engineid
enodeb
enqueue
enrollee
entityid
entryid
enttec
enumerates
enumerations
enumerator
envchange
epasv
epdcch
eperm
epsem
equinf
equiv
ericsson
erldp
errinf
errno
errorcode
errored
errorportinfo
erspan
España
esperanto
etheraddr
ethercat
ethers
ethertype
etlfile
ettarr
etwdump
etype
eutra
eutran
eventlog
executables
exflags
exocet
extattr
extcap
extensibility
extensible
extern
exthdr
extlen
extrainformation
eyesdn
facch
failover
fastcom
fastip
fastmsg
fattr
featureful
fhandle
fiber
fileset
firewall
fixme
flag1
flag2
flavored
flexray
flowid
flowmod
flowset
flowspec
fmconfig
followup
foobar
format0
fortigate
fortinet
fpiur
fraghdr
framenum
framenumber
frametype
frcrpt
freebsd
frontend
fsctl
ftype
ftypes
fujitsu
functionalities
funkt
fuzzed
fvalue
g711a
g711u
gamepad
ganss
gboolean
gchar
gcrypt
gendc
gentoo
geoip
geonw
geran
getattr
getentrybyname
getgroupinfo
getnext
getter
gidaddr
gigabit
gigamon
gigpod
github
gitlab
gluster
gmail
gmprs
gnodeb
gnutls
goaway
golomb
google
gpointer
gprscdr
gprsmeasurementparams3g
gregex
greyed
groupa
groupadd
groupb
groupcast
groupmod
gssapi
guint
gzipped
handoff
hangup
harqid
hartip
hashed
hashes
hazelcast
hcidump
headend
heuristic
hfarr
hfill,
HI2Operations
hnbap
homedir
homeplug
hopcount
hostname
howto
hpfeeds
hresult
hsdpa
hsdsch
hspdsch
hssite
hsupa
htonl
htons
http2
https
huawei
huffman
hytec
icmpv
ident
identifier
idiographic
idl2deb
idl2wrs
iec60870
ieee1609dot
ieee17221
ieee80211
iface
ifconfig
ifdef
ifname
ikev2
illuminance
imeisv
immersive
implementations
implementer
implementers
implementor
inactivated
inband
incits
incremented
incrementing
indenting
indirection
infile
infiniband
infix
infolist
informationitem
informationlist
infos
inited
initialise
initialising
initialization
initializations
initialize
initialized
initializer
initializers
initializes
initializing
inline
inode
inodes
inspiron
instantiate
instdir
instrumented
interferer
interleaving
interop
interruptible
interworking
inuse
invalidation
invalidly
ioctl
ioerr
ioflag
iograph
iotecha
ipaccess
ipaddr
ipaddress
ipcomp
ipconfig
iperf
ipfix
ipphone
ipprim
ipsec
ipseckey
iptables
iptrace
ipv4addr
ipv6addr
ipxnet
ipxnets
irqmask
isakmp
isatap
iscsi
iseries
isobus
isochronous
italia
iterating
iterator
itunes
iwarp
ixveriwave
jacobson
jetds
jsonraw
k12xx
kademlia
kasme
kasumi
kbytes
kchip
keepalive
kerberos
keydes
keygen
keyid
keylen
keylog
keymap
keypress
keyring
keyset
keytab
knxip
l2cap
l2vpn
l3vpn
laggy
lanalyzer
latencies
lbmpdm
lcgid
lcids
lcsap
leasequery
libgcrypt
libpcap
libsmi
licmgr
linearity
linkaddr
linkcss
linker
linkinfo
linksys
linux
list1
literals
lithionics
lnpdqp
logcat
loghans
loglocal
logoff
logout
logray
lookups
loopback
lossy
lscap
lucent
luminaire
luminance
macaddr
macaddress
macosx
macsec
mailto
malloc
manarg
mantissa
manuf
mappable
mariadb
marvell
mathieson
matrixes
maxlen
maybefcs
mbits
mbsfn
mbytes
mcast
mcptt
mcsset
measurability
measurements
medion
megabit
megaco
mellanox
memcache
memcpy
menubar
mergecap
merkle
meshcop
messageid
metadata
meteorological
metermod
México
mgmtmsg
microapp
microbit
midamble
millimeters
milliwatt
mingw
miniport
minislot
minislots
minus1
mirrorlink
misconfiguration
misconfigured
mitel
mitsubishi
mkdir
mmdbresolve
modbus
mode01
mode7
modepage
modespecificinfo
modulo
motorola
mozilla
mpeg4
mplstp
mpsse
mptcp
mrcpv
msbuild
mscldap
msgid
msglen
msgreq
msgsend
msgtype
msisdn
MSM7627A
mtftp
mtrace
mudurl
mulaw
multiband
multicarrier
multicast
multicasted
multicore
multiframe
multiframes
multihop
multilateration
multileg
multipacket
multipart
multipath
multiplexed
multiplexer
multiplexers
multiplexing
multiplicative
multiplicator
multirat
multirate
multislot
multistate
mumbai
mycapture
mycaptures
mydns
myhost
mysql
nacks
namelen
namespace
naptr
narrowband
nbrar
ndpsm
negotiability
nessie
netboot
netfilter
netflow
nethop
netkey
netkeyindex
netlink
netlogon
netmask
netmon
netricity
netscaler
nettl
netxray
newpw
nexthop
nextseq
nfs2err
nfs4err
nghttp
ngran
ngsniffer
niagra
nitnxlate
nnsvc
noascii
noauth
nodeid
nofcs
nokia
nominals
nonblock
noncriticalextension
noncriticalextensions
nopad
noqueue
nordig
nortel
notarized
notational
notif
notifier
notset
notused
novell
nowait
nowrap
npcap
nprach
nreport
nrppa
nrtcws
nsapi
nssai
nssvc
nstime
nstrace
ntlmssp
ntohl
ntohs
ntwkconn
nullptr
nvmeof
nvram
oampdu
obfuscated
objectid
objkey
obsoleted
octal
octet
octets
octetstring
ofdma
offloadability
ofpat
ofpbac
ofpbrc
ofpet
ofpgmfc
ofpmp
ofppf
ofptfpt
ofpxmt
om2000
omniidl
onboarding
onduration
onoff
ontime
opcode
opcodes
opcua
openssh
openssl
openstreetmap
openvpn
opflags
oplock
opnum
optimisation
optimizations
optimizer
optiplex
ordinal
oscillatory
oscore
osdmap
osmocom
osmux
ospf6
outhdr
pacch
packetcable
packetization
packetized
pagings
parallelization
param
parameterization
parameterized
params
paramset
parens
parlay
parms
parser
parses
passcode
passivated
passkey
passthrough
passwd
pbcch
pcapng
pccch
pcch
pcell
pcmax
pcmaxc
pcrepattern
pdcch
pdsch
pdustatus
peeraddr
peerkey
periodicities
peristency
persistency
pfname
pgpool
pharos
phaser
phasor
phich
phonebook
physcellid
picmg
pinfo
pixmap
plaintext
plano
plixer
plugin
pluginize
plugins
pluginsdir
pmconfig
pname
polestar
popup
portcounters
portid
portinfo
portmod
portno
portnumber
portset
portstatus
posix
postfix
powercontrol
pppdump
pppoe
prach
preauth
preconfiguration
preconfigured
predef
preempting
preemption
prefname
prefs
preloaded
prepay
prepend
preshared
printf
prioritization
prioritized
privkey
procid
profidrive
profinet
promisc
promiscsniff
promiscuously
propertykey
protected
protoabbrev
protobuf
protocolie
protos
proxied
proxying
pscell
pseudowire
psname
ptime
ptvcursor
ptype
pubdir
pubkey
pucch
pusch
pwach
pwrprof
pxeclient
pytest
qam16
qam64
qmgmt
qnet6
qosinfo
qsearch
quadlet
quadrature
quadro
quantifiers
queryhit
queryset
quiescing
quintuplet
quintuplets
r3info
radcom
radeon
radiotap
radix
ralink
ranap
randomization
randomize
randomizer
randpkt
raster
rdpudp
rdtci
reachability
readme
realloc
realtek
realtime
reassembles
reassigning
reassignments
reassigns
reassociation
reattach
reattached
reauth
reauthenticate
reauthentication
reauthorize
rebalance
rebase
rebinding
rebooted
reboots
recalculate
recalculating
recalculation
recalibrate
recognizer
recompiled
recompiling
recomputed
reconf
reconfig
reconfigurable
reconfigure
reconfigured
reconfigures
reconfirm
reconfrqst
recursively
redelivered
redelivery
redir
redirector
redirects
redistributable
redistributables
reencyption
reentry
reestablishing
reestablishment
referer
referrer
regex
regexp
regionid
reimplemented
reinitialization
reinitialize
reinitialized
reinitializing
reinjected
reinjection
reinvoke
rekey
rekeying
relocatable
remapping
renumbering
reoptimization
reoptimized
reordercap
reorigination
representable
reprogrammable
reprogramming
requester
requestor
requeue
reregister
reroute
rerouted
rerouting
resampled
resampler
rescan
resegment
resend
resequencing
reservable
reserved
reserved0
reserved1
reserved2
reserved3
reserved4
reserved5
resize
resized
resolvable
resolver
resolvers
resub
resubmission
resynchronization
resynchronize
retrans
retransmission
retransmissions
retransmit
retransmits
retransmitted
retransmitter
retries
retry
retrying
retval
retyping
revalidate
revalidation
revertive
revocations
rfcomm
rfmon
rgoose
ripemd
rlcmac
rmcap
rngrsp
rnsap
roamer
routable
rowfmt
rpcap
rpmbuild
rsocket
rsrvd
rtitcp
rtpmidi
rtpmux
ruleset
rxchannel
rxlen
rxlev
rxreq
s7comm
sabme
sacch
sanicap
sanitize
sapgui
satisfiable
scalability
scaleout
scaler
scannable
scdma
scell
scoped
scrollbar
sdcch
sdjournal
sdusize
sectorization
sectorized
segmenting
segno
semiautomatic
sendto
separability
separators
seqno
seqnr
seqnum
sequenceno
sercos
serialize
serialized
servlet
sessionid
sessionkey
setattr
setcap
setuid
severities
sfiocr
sflow
sftpserver
sftserver
sgdsn
sgsap
sha256
sha384
sha512
sharkd
shomiti
siapp
sidelink
signaal
signaling
signon
simulcast
sistemas
skippable
skype
slaac
slimp
slsch
smpte
smrse
sname
snaplen
snow3g
snprintf
softkey
solera
someip
someipsd
sonet
spare
spare1
spare2
spare3
spare4
spare5
spare6
spare7
spare8
spare9
spcell
specifiers
spectrograph
speex
spline
spnego
spoofing
spooled
srbid
srcport
srtcp
srvcc
sshdump
sshkey
ssupervisor
stanag
stateful
statfs
statusbar
stderr
stdin
stdout
strbuf
strdup
streamid
stringz
stringzpad
struct
structs
subaddress
subband
subcarrier
subcarriers
subchannel
subcode
subdevice
subdissector
subdissectors
subdoc
subelem
subelement
subelements
subframes
subfunc
subhd
subheader
subheaders
subids
subidx
subindex
subkey
subm
submode
subnet
subnets
subobj
subobject
subopt
suboption
suboptions
subparam
subpdu
subpm
subprocesstest
subquery
subrects
subselect
subselection
subslot
subtlv
subtree
subtrees
superset
sverige
svhdx
switchinfo
symantec
synchronizer
synchronizing
synchronously
syncman
syniverse
synphasor
syntaxes
sysdig
sysex
sysframe
syslog
sysmac
systemd
tablemod
tabular
tclas
tcpdump
tcpflags
tcpip
tcptrace
tcpudp
tdd128
tdd384
tdd768
technica
Tektronix
Telecomunicaciones
telefonica
Telefónica
Teléfonos
telekom
telenor
teletex
telfonica
telia
teredo
tesla
text2pcap
textbox
thermister
thermistor
thunderx
timeout
timeslot
timestamp
timestamps
timezone
tipcv
toggled
toggling
toolbar
toolongfragment
toolset
tooltip
topup
toshiba
totemsrp
touchlink
touchpad
traceroute
traff
transceive
transcoder
transifex
transitioning
transitivity
transum
transversal
traveler
traversal
trcdbg
trunc
truncatable
truncate
truncates
truncating
tshark
tspec
tunid
tunneled
tunneling
tuple
tuples
tvbparse
tvbuff
twamp
twopc
txchannel
type1
type2
type3
typedef
uarfcn
uavcan
uboot
ubuntu
ucast
udpcp
udpdump
udphdr
udrei
uievent
uint16
uint32
uint8
ulmap
ulsch
unack
unacked
unadmitted
unadvise
unaligned
unallocated
unallowed
unassign
unassoc
unauthenticated
unbind
unbuffered
uncalculated
uncalibrated
uncategorized
unchannelized
unciphered
uncoloured
uncompensated
uncompress
uncompressed
uncompressing
uncompression
unconfigurable
unconfigured
unconfirm
uncontended
uncorrectable
undecidable
undecipherable
undecodable
undecoded
undecryptable
undecrypted
undelete
undeliverable
underflow
underrun
undisposed
undissected
unduplicated
unencrypted
unescaped
unescaping
unexported
unformatted
unfragmented
unframed
ungrab
unhandled
unhidden
unicast
unicode
unicom
unignore
unimplemented
uninformative
uninitialized
uninstall
uninstallation
uninstalled
uninstaller
uninterruptable
universitaet
unjoin
unjoined
unjoining
unknown1
unlink
unlinked
unmanaged
unmap
unmappable
unmark
unmarshal
unmerged
unmodulated
unmute
unmuted
unnumb
unoptimized
unordered
unparsable
unparseable
unparsed
unprocessable
unpublish
unpunctuated
unquoted
unreach
unreassembled
unreceived
unrecoverable
unrecovered
unregister
unregistration
unreportable
unresolvable
unresponded
unroutable
unsecure
unsegmented
unsequenced
unspec
unsubscribe
unsubscribed
unsynchronized
untagged
unterminated
untruncated
untrusted
untunneled
untyped
unvisited
unvoiced
updatability
updatable
upiri
uplink
upload
uploaded
uploading
uploads
urlencoded
urnti
usability
usbmon
usbms
usbpcap
userauth
userdata
userinfo
userlist
userplane
utf8mb
utilization
utils
utran
uuencoded
v1250
v1310
v1410
v1530
v1610
validator
varint
vcpkg
vcredist
vcxproj
vector3d
venusmngr
verbosity
verifier
verizon
version2
version3
version4
version5
version6
version7
versioned
versioning
vhdset
viavi
virtex
virtio
virtualization
vlans
vnode
vocoder
vodafone
voipmetrics
volerr
vxlan
wakeup
wapforum
wbxml
webcam
webkit
websocket
whoami
wideband
wifidump
wikipedia
wikis
wimax
wimaxasncp
winflexbison
winget
winpcap
winspool
wiphy
wireshark
wiretap
wisun
withfcs
withoutfcs
wksta
workarounds
wowlan
wpcap
wrepl
writable
wsbuild
wscale
wscbor
wslua
wsluarm
wsutil
X32bit
x509if
x509sat
xattr
xauth
xchannel
xcode
xetra
xferext
xmlns
xsltproc
xtreme
z3950
zbncp
zeroes
zigbee
zugtyp |
wireshark/tools/ws-coding-style.cfg | # Ref: https://gitlab.com/wireshark/wireshark/-/issues/5924
#
# FF: uncrustify config file for Wireshark (based on cheese-indent.cfg and
# linux.cfg... taken somewhere from the Net)
# http://uncrustify.sourceforge.net/
# typical usage:
#
# uncrustify -c ../../tools/ws-coding-style.cfg --replace packet-dccp.c
#
# The number of columns to indent per level.
# Usually 2, 3, 4, or 8.
indent_columns = 4 # number, FF: 8 on linux
# How to use tabs when indenting code
# 0=spaces only
# 1=indent with tabs, align with spaces
# 2=indent and align with tabs
indent_with_tabs = 0 # number, FF: spaces only, questionable... as
# everything about this topic :-)
# Spaces to indent '{' from level
indent_brace = 0 # number
# Spaces to indent 'case' from 'switch'
# Usually 0 or indent_columns.
indent_switch_case = 0 #indent_columns # number
# Add or remove space around arithmetic operator '+', '-', '/', '*', etc
sp_arith = force # ignore/add/remove/force
# Add or remove space around assignment operator '=', '+=', etc
sp_assign = force # ignore/add/remove/force
# Add or remove space around assignment '=' in enum
sp_enum_assign = force # ignore/add/remove/force
# Add or remove space around boolean operators '&&' and '||'
sp_bool = force # ignore/add/remove/force
# Add or remove space around compare operator '<', '>', '==', etc
sp_compare = force # ignore/add/remove/force
# Add or remove space inside '(' and ')'
sp_inside_paren = remove # ignore/add/remove/force
# Add or remove space between nested parens
sp_paren_paren = remove # ignore/add/remove/force
# Add or remove space before pointer star '*'
sp_before_ptr_star = force # ignore/add/remove/force
# Add or remove space between pointer stars '*'
sp_between_ptr_star = remove # ignore/add/remove/force
# Add or remove space after pointer star '*', if followed by a word.
sp_after_ptr_star = remove # ignore/add/remove/force
# Add or remove space before reference sign '&'
sp_before_byref = force # ignore/add/remove/force
# Add or remove space after reference sign '&', if followed by a word.
sp_after_byref = remove # ignore/add/remove/force
# Add or remove space between type and word
sp_after_type = force # ignore/add/remove/force
# Add or remove space before '(' of 'if', 'for', 'switch', and 'while'
sp_before_sparen = force # ignore/add/remove/force
# Add or remove space inside if-condition '(' and ')'
sp_inside_sparen = remove # ignore/add/remove/force
# Add or remove space after ')' of 'if', 'for', 'switch', and 'while'
sp_after_sparen = force # ignore/add/remove/force
# Add or remove space between ')' and '{' of 'if', 'for', 'switch', and 'while'
sp_sparen_brace = force # ignore/add/remove/force
# Add or remove space before empty statement ';' on 'if', 'for' and 'while'
sp_special_semi = remove # ignore/add/remove/force
# Add or remove space before ';'
sp_before_semi = remove # ignore/add/remove/force
# Add or remove space before ';' in non-empty 'for' statements
sp_before_semi_for = remove # ignore/add/remove/force
# Add or remove space inside '[' and ']'
sp_inside_square = remove # ignore/add/remove/force
# Add or remove space before '[' (except '[]')
sp_before_square = remove # ignore/add/remove/force
# Add or remove space before '[]'
sp_before_squares = remove # ignore/add/remove/force
# Add or remove space after ','
sp_after_comma = force # ignore/add/remove/force
# Add or remove space before ','
sp_before_comma = remove # ignore/add/remove/force
# Add or remove space after C/D cast, ie 'cast(int)a' vs 'cast(int) a' or '(int)a' vs '(int) a'
sp_after_cast = force # ignore/add/remove/force
# Add or remove spaces inside cast parens
sp_inside_paren_cast = remove # ignore/add/remove/force
# Add or remove space between 'sizeof' and '('
sp_sizeof_paren = force # ignore/add/remove/force
# Add or remove space inside '{' and '}'
sp_inside_braces = remove # ignore/add/remove/force
# Add or remove space inside '{}'
sp_inside_braces_empty = remove # ignore/add/remove/force
# Add or remove space inside enum '{' and '}'
sp_inside_braces_enum = remove # ignore/add/remove/force
# Add or remove space inside struct/union '{' and '}'
sp_inside_braces_struct = remove # ignore/add/remove/force
# Add or remove space between function name and '(' on function declaration
sp_func_proto_paren = remove # ignore/add/remove/force, FF was force
# Add or remove space between function name and '(' on function definition
sp_func_def_paren = remove # ignore/add/remove/force, FF was force
# Add or remove space inside empty function '()'
sp_inside_fparens = remove # ignore/add/remove/force
# Add or remove space inside function '(' and ')'
sp_inside_fparen = remove # ignore/add/remove/force
# Add or remove space between function name and '(' on function calls
sp_func_call_paren = remove # ignore/add/remove/force, FF: was 'force'
sp_func_call_user_paren = remove # ignore/add/remove/force
set func_call_user _ N_
# Add or remove space between 'return' and '('
sp_return_paren = force # ignore/add/remove/force
# Add or remove space between 'defined' and '(' in '#if defined (FOO)'
sp_defined_paren = force # ignore/add/remove/force
# Add or remove space between macro and value
sp_macro = force # ignore/add/remove/force
# Add or remove space between macro function ')' and value
sp_macro_func = force # ignore/add/remove/force
# Add or remove space around the ':' in 'b ? t : f'
sp_cond_colon = force # ignore/add/remove/force
# Add or remove space around the '?' in 'b ? t : f'
sp_cond_question = force # ignore/add/remove/force
# Add or remove space before a semicolon of an empty part of a for statment.
sp_before_semi_for_empty = force # ignore/add/remove/force
# Space between close brace and else
sp_brace_else = force # string (add/force/ignore/remove)
# Space between close parenthesis and open brace
sp_paren_brace = force # string (add/force/ignore/remove)
# Space between else and open brace
sp_else_brace = force # string (add/force/ignore/remove)
# How to align the star in variable definitions.
# 0=Part of the type
# 1=Part of the variable
# 2=Dangling
align_var_def_star_style = 2 # number (FF: see align_typedef_star_style)
# How to align the '&' in variable definitions.
# 0=Part of the type
# 1=Part of the variable
# 2=Dangling
align_var_def_amp_style = 2 # number
# Align variable definitions in prototypes and functions
align_func_params = true # false/true
# Whether to align the colon in struct bit fields
align_var_def_colon = true # false/true
# Whether to align inline struct/enum/union variable definitions
align_var_def_inline = true # false/true
# The span for aligning function prototypes (0=don't align)
align_func_proto_span = 1 # number
# The span for aligning on '#define' bodies (0=don't align)
align_pp_define_span = 0 # number
# Controls the positioning of the '*' in typedefs. Just try it.
# 0: Align on typdef type, ignore '*'
# 1: The '*' is part of type name: typedef int *pint;
# 2: The '*' is part of the type, but dangling: typedef int *pint;
align_typedef_star_style = 2 # number
# Controls the positioning of the '&' in typedefs. Just try it.
# 0: Align on typdef type, ignore '&'
# 1: The '&' is part of type name: typedef int &pint;
# 2: The '&' is part of the type, but dangling: typedef int &pint;
align_typedef_amp_style = 2 # number
# Whether to align macros wrapped with a backslash and a newline.
# This will not work right if the macro contains a multi-line comment.
align_nl_cont = true # false/true
# The span for aligning struct/union (0=don't align)
align_var_struct_span = 1 # number
# The threshold for aligning struct/union member definitions (0=no limit)
align_var_struct_thresh = 1 # number
# The gap for aligning struct/union member definitions
align_var_struct_gap = 1 # number
# The span for aligning struct initializer values (0=don't align)
align_struct_init_span = 1 # number
# The gap for aligning variable definitions
align_var_def_gap = 1 # number
# The span for aligning on '=' in assignments (0=don't align)
align_assign_span = 0 # number
# The span for aligning on '=' in enums (0=don't align)
align_enum_equ_span = 0 # number
# The span for aligning variable definitions (0=don't align)
align_var_def_span = 0 # number
# Add or remove newline at the end of the file
nl_end_of_file = force # ignore/add/remove/force
# The number of newlines at the end of the file (only used if nl_end_of_file is 'add' or 'force')
nl_end_of_file_min = 1 # number
# Add or remove newline between '=' and '{'
nl_assign_brace = remove # ignore/add/remove/force
# Add or remove newline between 'enum' and '{'
nl_enum_brace = force # ignore/add/remove/force
# Add or remove newline between 'struct and '{'
nl_struct_brace = force # ignore/add/remove/force
# Add or remove newline between 'union' and '{'
nl_union_brace = force # ignore/add/remove/force
# Add or remove newline between 'if' and '{'
nl_if_brace = remove # ignore/add/remove/force, FF: was 'force'
# Add or remove newline between '}' and 'else'
nl_brace_else = remove # ignore/add/remove/force, FF: was 'force'
# Add or remove newline between a function call's ')' and '{', as in:
# list_for_each(item, &list) { }
nl_fcall_brace = force # ignore/add/remove/force
# Add or remove newline between 'else if' and '{'
# If set to ignore, nl_if_brace is used instead
nl_elseif_brace = remove # ignore/add/remove/force, FF: was 'force'
# Add or remove newline between 'else' and '{'
nl_else_brace = remove # ignore/add/remove/force, FF: was 'force'
# Add or remove newline between 'else' and 'if'
nl_else_if = remove # ignore/add/remove/force
# Add or remove newline between 'for' and '{'
nl_for_brace = remove # ignore/add/remove/force
# Add or remove newline between 'while' and '{'
nl_while_brace = remove # ignore/add/remove/force, FF: was 'force'
# Add or remove newline between 'do' and '{'
nl_do_brace = force # ignore/add/remove/force
# Add or remove newline between '}' and 'while' of 'do' statement
nl_brace_while = force # ignore/add/remove/force
# Add or remove newline between 'switch' and '{'
nl_switch_brace = remove # ignore/add/remove/force
# Add or remove newline between return type and function name in definition
nl_func_type_name = force # ignore/add/remove/force
# Add or remove newline between return type and function name in a prototype
nl_func_proto_type_name = remove # ignore/add/remove/force
# Add or remove newline between a function name and the opening '('
nl_func_paren = remove # ignore/add/remove/force
# Add or remove newline after '(' in a function declaration
nl_func_decl_start = remove # ignore/add/remove/force
# Add or remove newline after each ',' in a function declaration
nl_func_decl_args = ignore # ignore/add/remove/force
# Add or remove newline before the ')' in a function declaration
nl_func_decl_end = remove # ignore/add/remove/force
# Add or remove newline between function signature and '{'
nl_fdef_brace = force # ignore/add/remove/force
# The number of newlines after '}' of a multi-line function body
nl_after_func_body = 2 # number
# The number of newlines after '}' of a single line function body
nl_after_func_body_one_liner = 2 # number
# The number of newlines after a block of variable definitions
nl_func_var_def_blk = 1 # number
# The minimum number of newlines before a multi-line comment.
# Doesn't apply if after a brace open or another multi-line comment.
nl_before_block_comment = 2 # number
# The minimum number of newlines before a single-line C comment.
# Doesn't apply if after a brace open or other single-line C comments.
nl_before_c_comment = 2 # number
# The minimum number of newlines before a CPP comment.
# Doesn't apply if after a brace open or other CPP comments.
nl_before_cpp_comment = 2 # number
# Don't touch one-line braced assignments - 'foo_t f = { 1, 2 };'
nl_assign_leave_one_liners = true # false/true
# Whether to not put blanks after '#ifxx', '#elxx', or before '#endif'
nl_squeeze_ifdef = true # false/true
# Whether to remove blank lines after '{'
eat_blanks_after_open_brace = true # false/true
# Whether to remove blank lines before '}'
eat_blanks_before_close_brace = true # false/true
# Whether to put a star on subsequent comment lines
cmt_star_cont = true # false/true
# Whether to group c-comments that look like they are in a block
cmt_c_group = true # false/true
# Whether to group cpp-comments that look like they are in a block
cmt_cpp_group = false # false/true
# Whether to change cpp-comments into c-comments
cmt_cpp_to_c = false # false/true
# If pp_indent_at_level=false, specifies the number of columns to indent per
# level. Default=1.
pp_indent_count = indent_columns # number
# Add or remove indent of preprocessor directives
pp_indent = remove # ignore/add/remove/force FF: was 'force'
# Try to limit code width to N number of columns
code_width = 100 # number
# Whether to fully split long function protos/calls at commas
ls_func_split_full = false # false/true, FF: was 'true' |
|
Python | wireshark/tools/yacc.py | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2015,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import os.path
import inspect
import base64
import warnings
__version__ = '3.8'
__tabversion__ = '3.8'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: https://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
dr_set = {}
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec, slevel = Productions[st_actionp[a].number].prec
rprec, rlevel = Precedence.get(a, ('right', 0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec, rlevel = Productions[st_actionp[a].number].prec
sprec, slevel = Precedence.get(a, ('right', 0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update(''.join([''.join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(' '.join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError, ValueError):
pass
digest = base64.b16encode(sig.digest())
if sys.version_info[0] >= 3:
digest = digest.decode('latin-1')
return digest
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
lines, linen = inspect.getsourcelines(module)
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = item.__code__.co_firstlineno
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (https://www.dabeaz.com/ply/)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser |
C | wireshark/tools/asterix/packet-asterix-template.c | /*
Notice:
This file is auto generated, do not edit!
See tools/asterix/README.md for details.
Data source:
---{gitrev}---
*/
/* packet-asterix.c
* Routines for ASTERIX decoding
* By Marko Hrastovec <[email protected]>
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <[email protected]>
* Copyright 1998 Gerald Combs
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
/*
* ASTERIX (All-purpose structured EUROCONTROL surveillances
* information exchange) is a protocol related to air traffic control.
*
* The specifications can be downloaded from
* http://www.eurocontrol.int/services/asterix
*/
#include <config.h>
#include <wsutil/bits_ctz.h>
#include <epan/packet.h>
#include <epan/prefs.h>
#include <epan/proto_data.h>
void proto_register_asterix(void);
void proto_reg_handoff_asterix(void);
#define PROTO_TAG_ASTERIX "ASTERIX"
#define ASTERIX_PORT 8600
#define MAX_DISSECT_STR 1024
#define MAX_BUFFER 256
static int proto_asterix = -1;
static int hf_asterix_category = -1;
static int hf_asterix_length = -1;
static int hf_asterix_message = -1;
static int hf_asterix_fspec = -1;
static int hf_re_field_len = -1;
static int hf_spare = -1;
static int hf_counter = -1;
static int hf_XXX_FX = -1;
static int ett_asterix = -1;
static int ett_asterix_category = -1;
static int ett_asterix_length = -1;
static int ett_asterix_message = -1;
static int ett_asterix_subtree = -1;
static dissector_handle_t asterix_handle;
/* The following defines tell us how to decode the length of
* fields and how to construct their display structure */
#define FIXED 1
#define REPETITIVE 2
#define FX 3
/*#define FX_1 4*/
/*#define RE 5*/
#define COMPOUND 6
/*#define SP 7*/
#define FX_UAP 8 /* The FX_UAP field type is a hack. Currently it *
* is only used in: *
* - I001_020 *
* - asterix_get_active_uap() */
#define EXP 9 /* Explicit (RE or SP) */
/* The following defines tell us how to
* decode and display individual fields. */
#define FIELD_PART_INT 0
#define FIELD_PART_UINT 1
#define FIELD_PART_FLOAT 2
#define FIELD_PART_UFLOAT 3
#define FIELD_PART_SQUAWK 4
#define FIELD_PART_CALLSIGN 5
#define FIELD_PART_ASCII 6
#define FIELD_PART_FX 7
#define FIELD_PART_HEX 8
#define FIELD_PART_IAS_IM 9
#define FIELD_PART_IAS_ASPD 10
typedef struct FieldPart_s FieldPart;
struct FieldPart_s {
uint16_t bit_length; /* length of field in bits */
double scaling_factor; /* scaling factor of the field (for instance: 1/128) */
uint8_t type; /* Pre-defined type for proper presentation */
int *hf; /* Pointer to hf representing this kind of data */
const char *format_string; /* format string for showing float values */
};
DIAG_OFF_PEDANTIC
typedef struct AsterixField_s AsterixField;
struct AsterixField_s {
uint8_t type; /* type of field */
unsigned length; /* fixed length */
unsigned repetition_counter_size; /* size of repetition counter, length of one item is in length */
unsigned header_length; /* the size is in first header_length bytes of the field */
int *hf; /* pointer to Wireshark hf_register_info */
const FieldPart **part; /* Look declaration and description of FieldPart above. */
const AsterixField *field[]; /* subfields */
};
DIAG_ON_PEDANTIC
static void dissect_asterix_packet (tvbuff_t *, packet_info *pinfo, proto_tree *);
static void dissect_asterix_data_block (tvbuff_t *tvb, packet_info *pinfo, unsigned, proto_tree *, uint8_t, int);
static int dissect_asterix_fields (tvbuff_t *, packet_info *pinfo, unsigned, proto_tree *, uint8_t, const AsterixField *[]);
static void asterix_build_subtree (tvbuff_t *, packet_info *pinfo, unsigned, proto_tree *, const AsterixField *);
static void twos_complement (int64_t *, int);
static uint8_t asterix_bit (uint8_t, uint8_t);
static unsigned asterix_fspec_len (tvbuff_t *, unsigned);
static uint8_t asterix_field_exists (tvbuff_t *, unsigned, int);
static uint8_t asterix_get_active_uap (tvbuff_t *, unsigned, uint8_t);
static int asterix_field_length (tvbuff_t *, unsigned, const AsterixField *);
static int asterix_field_offset (tvbuff_t *, unsigned, const AsterixField *[], int);
static int asterix_message_length (tvbuff_t *, unsigned, uint8_t, uint8_t);
static const char AISCode[] = { ' ', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ' ', ' ', ' ', ' ', ' ',
' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ' ', ' ', ' ', ' ', ' ', ' ' };
static const value_string valstr_XXX_FX[] = {
{ 0, "End of data item" },
{ 1, "Extension into next extent" },
{ 0, NULL }
};
static const FieldPart IXXX_FX = { 1, 1.0, FIELD_PART_FX, &hf_XXX_FX, NULL };
static const FieldPart IXXX_1bit_spare = { 1, 1.0, FIELD_PART_UINT, NULL, NULL };
static const FieldPart IXXX_2bit_spare = { 2, 1.0, FIELD_PART_UINT, NULL, NULL };
static const FieldPart IXXX_3bit_spare = { 3, 1.0, FIELD_PART_UINT, NULL, NULL };
static const FieldPart IXXX_4bit_spare = { 4, 1.0, FIELD_PART_UINT, NULL, NULL };
static const FieldPart IXXX_5bit_spare = { 5, 1.0, FIELD_PART_UINT, NULL, NULL };
static const FieldPart IXXX_6bit_spare = { 6, 1.0, FIELD_PART_UINT, NULL, NULL };
static const FieldPart IXXX_7bit_spare = { 7, 1.0, FIELD_PART_UINT, NULL, NULL };
/* Spare Item */
DIAG_OFF_PEDANTIC
static const AsterixField IX_SPARE = { FIXED, 0, 0, 0, &hf_spare, NULL, { NULL } };
/* insert1 */
---{insert1}---
/* insert1 */
/* settings which category version to use for each ASTERIX category */
static int global_categories_version[] = {
0, /* 000 */
0, /* 001 */
0, /* 002 */
0, /* 003 */
0, /* 004 */
0, /* 005 */
0, /* 006 */
0, /* 007 */
0, /* 008 */
0, /* 009 */
0, /* 010 */
0, /* 011 */
0, /* 012 */
0, /* 013 */
0, /* 014 */
0, /* 015 */
0, /* 016 */
0, /* 017 */
0, /* 018 */
0, /* 019 */
0, /* 020 */
0, /* 021 */
0, /* 022 */
0, /* 023 */
0, /* 024 */
0, /* 025 */
0, /* 026 */
0, /* 027 */
0, /* 028 */
0, /* 029 */
0, /* 030 */
0, /* 031 */
0, /* 032 */
0, /* 033 */
0, /* 034 */
0, /* 035 */
0, /* 036 */
0, /* 037 */
0, /* 038 */
0, /* 039 */
0, /* 040 */
0, /* 041 */
0, /* 042 */
0, /* 043 */
0, /* 044 */
0, /* 045 */
0, /* 046 */
0, /* 047 */
0, /* 048 */
0, /* 049 */
0, /* 050 */
0, /* 051 */
0, /* 052 */
0, /* 053 */
0, /* 054 */
0, /* 055 */
0, /* 056 */
0, /* 057 */
0, /* 058 */
0, /* 059 */
0, /* 060 */
0, /* 061 */
0, /* 062 */
0, /* 063 */
0, /* 064 */
0, /* 065 */
0, /* 066 */
0, /* 067 */
0, /* 068 */
0, /* 069 */
0, /* 070 */
0, /* 071 */
0, /* 072 */
0, /* 073 */
0, /* 074 */
0, /* 075 */
0, /* 076 */
0, /* 077 */
0, /* 078 */
0, /* 079 */
0, /* 080 */
0, /* 081 */
0, /* 082 */
0, /* 083 */
0, /* 084 */
0, /* 085 */
0, /* 086 */
0, /* 087 */
0, /* 088 */
0, /* 089 */
0, /* 090 */
0, /* 091 */
0, /* 092 */
0, /* 093 */
0, /* 094 */
0, /* 095 */
0, /* 096 */
0, /* 097 */
0, /* 098 */
0, /* 099 */
0, /* 100 */
0, /* 101 */
0, /* 102 */
0, /* 103 */
0, /* 104 */
0, /* 105 */
0, /* 106 */
0, /* 107 */
0, /* 108 */
0, /* 109 */
0, /* 110 */
0, /* 111 */
0, /* 112 */
0, /* 113 */
0, /* 114 */
0, /* 115 */
0, /* 116 */
0, /* 117 */
0, /* 118 */
0, /* 119 */
0, /* 120 */
0, /* 121 */
0, /* 122 */
0, /* 123 */
0, /* 124 */
0, /* 125 */
0, /* 126 */
0, /* 127 */
0, /* 128 */
0, /* 129 */
0, /* 130 */
0, /* 131 */
0, /* 132 */
0, /* 133 */
0, /* 134 */
0, /* 135 */
0, /* 136 */
0, /* 137 */
0, /* 138 */
0, /* 139 */
0, /* 140 */
0, /* 141 */
0, /* 142 */
0, /* 143 */
0, /* 144 */
0, /* 145 */
0, /* 146 */
0, /* 147 */
0, /* 148 */
0, /* 149 */
0, /* 150 */
0, /* 151 */
0, /* 152 */
0, /* 153 */
0, /* 154 */
0, /* 155 */
0, /* 156 */
0, /* 157 */
0, /* 158 */
0, /* 159 */
0, /* 160 */
0, /* 161 */
0, /* 162 */
0, /* 163 */
0, /* 164 */
0, /* 165 */
0, /* 166 */
0, /* 167 */
0, /* 168 */
0, /* 169 */
0, /* 170 */
0, /* 171 */
0, /* 172 */
0, /* 173 */
0, /* 174 */
0, /* 175 */
0, /* 176 */
0, /* 177 */
0, /* 178 */
0, /* 179 */
0, /* 180 */
0, /* 181 */
0, /* 182 */
0, /* 183 */
0, /* 184 */
0, /* 185 */
0, /* 186 */
0, /* 187 */
0, /* 188 */
0, /* 189 */
0, /* 190 */
0, /* 191 */
0, /* 192 */
0, /* 193 */
0, /* 194 */
0, /* 195 */
0, /* 196 */
0, /* 197 */
0, /* 198 */
0, /* 199 */
0, /* 200 */
0, /* 201 */
0, /* 202 */
0, /* 203 */
0, /* 204 */
0, /* 205 */
0, /* 206 */
0, /* 207 */
0, /* 208 */
0, /* 209 */
0, /* 210 */
0, /* 211 */
0, /* 212 */
0, /* 213 */
0, /* 214 */
0, /* 215 */
0, /* 216 */
0, /* 217 */
0, /* 218 */
0, /* 219 */
0, /* 220 */
0, /* 221 */
0, /* 222 */
0, /* 223 */
0, /* 224 */
0, /* 225 */
0, /* 226 */
0, /* 227 */
0, /* 228 */
0, /* 229 */
0, /* 230 */
0, /* 231 */
0, /* 232 */
0, /* 233 */
0, /* 234 */
0, /* 235 */
0, /* 236 */
0, /* 237 */
0, /* 238 */
0, /* 239 */
0, /* 240 */
0, /* 241 */
0, /* 242 */
0, /* 243 */
0, /* 244 */
0, /* 245 */
0, /* 246 */
0, /* 247 */
0, /* 248 */
0, /* 249 */
0, /* 250 */
0, /* 251 */
0, /* 252 */
0, /* 253 */
0, /* 254 */
0 /* 255 */
};
static int dissect_asterix (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U_)
{
col_set_str (pinfo->cinfo, COL_PROTOCOL, "ASTERIX");
col_clear (pinfo->cinfo, COL_INFO);
if (tree) { /* we are being asked for details */
dissect_asterix_packet (tvb, pinfo, tree);
}
return tvb_captured_length(tvb);
}
static void dissect_asterix_packet (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
{
unsigned i;
uint8_t category;
uint16_t length;
proto_item *asterix_packet_item;
proto_tree *asterix_packet_tree;
for (i = 0; i < tvb_reported_length (tvb); i += length + 3) {
/* all ASTERIX messages have the same structure:
*
* header:
*
* 1 byte category even though a category is referenced as I019,
* this is just stored as decimal 19 (i.e. 0x13)
* 2 bytes length the total length of this ASTERIX message, the
* length includes the size of the header.
*
* Note that the there was a structural change at
* one point that changes whether multiple
* records can occur after the header or not
* (each category specifies this explicitly. All
* of the currently supported categories can have
* multiple records so this implementation just
* assumes that is always the case)
*
* record (multiple records can exists):
*
* n bytes FSPEC the field specifier is a bit mask where the
* lowest bit of each byte is called the FX bit.
* When the FX bit is set this indicates that
* the FSPEC extends into the next byte.
* Any other bit indicates the presence of the
* field that owns that bit (as per the User
* Application Profile (UAP)).
* X bytes Field Y X is as per the specification for field Y.
* etc.
*
* The User Application Profile (UAP) is simply a mapping from the
* FSPEC to fields. Each category has its own UAP.
*/
category = tvb_get_guint8 (tvb, i);
length = (tvb_get_guint8 (tvb, i + 1) << 8) + tvb_get_guint8 (tvb, i + 2) - 3; /* -3 for category and length */
asterix_packet_item = proto_tree_add_item (tree, proto_asterix, tvb, i, length + 3, ENC_NA);
proto_item_append_text (asterix_packet_item, ", Category %03d", category);
asterix_packet_tree = proto_item_add_subtree (asterix_packet_item, ett_asterix);
proto_tree_add_item (asterix_packet_tree, hf_asterix_category, tvb, i, 1, ENC_BIG_ENDIAN);
proto_tree_add_item (asterix_packet_tree, hf_asterix_length, tvb, i + 1, 2, ENC_BIG_ENDIAN);
dissect_asterix_data_block (tvb, pinfo, i + 3, asterix_packet_tree, category, length);
}
}
static void dissect_asterix_data_block (tvbuff_t *tvb, packet_info *pinfo, unsigned offset, proto_tree *tree, uint8_t category, int length)
{
uint8_t active_uap;
int fspec_len, inner_offset, size, counter;
proto_item *asterix_message_item = NULL;
proto_tree *asterix_message_tree = NULL;
for (counter = 1, inner_offset = 0; inner_offset < length; counter++) {
/* This loop handles parsing of each ASTERIX record */
active_uap = asterix_get_active_uap (tvb, offset + inner_offset, category);
size = asterix_message_length (tvb, offset + inner_offset, category, active_uap);
if (size > 0) {
asterix_message_item = proto_tree_add_item (tree, hf_asterix_message, tvb, offset + inner_offset, size, ENC_NA);
proto_item_append_text (asterix_message_item, ", #%02d, length: %d", counter, size);
asterix_message_tree = proto_item_add_subtree (asterix_message_item, ett_asterix_message);
fspec_len = asterix_fspec_len (tvb, offset + inner_offset);
/*show_fspec (tvb, asterix_message_tree, offset + inner_offset, fspec_len);*/
proto_tree_add_item (asterix_message_tree, hf_asterix_fspec, tvb, offset + inner_offset, fspec_len, ENC_NA);
size = dissect_asterix_fields (tvb, pinfo, offset + inner_offset, asterix_message_tree, category, categories[category][global_categories_version[category]][active_uap]);
inner_offset += size + fspec_len;
}
else {
inner_offset = length;
}
}
}
static int dissect_asterix_fields (tvbuff_t *tvb, packet_info *pinfo, unsigned offset, proto_tree *tree, uint8_t category, const AsterixField *current_uap[])
{
unsigned i, j, size, start, len, inner_offset, fspec_len;
uint64_t counter;
proto_item *asterix_field_item = NULL;
proto_tree *asterix_field_tree = NULL;
proto_item *asterix_field_item2 = NULL;
proto_tree *asterix_field_tree2 = NULL;
if (current_uap == NULL)
return 0;
for (i = 0, size = 0; current_uap[i] != NULL; i++) {
start = asterix_field_offset (tvb, offset, current_uap, i);
if (start > 0) {
len = asterix_field_length (tvb, offset + start, current_uap[i]);
size += len;
switch(current_uap[i]->type) {
case COMPOUND:
asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA);
asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree);
fspec_len = asterix_fspec_len (tvb, offset + start);
proto_tree_add_item (asterix_field_tree, hf_asterix_fspec, tvb, offset + start, fspec_len, ENC_NA);
dissect_asterix_fields (tvb, pinfo, offset + start, asterix_field_tree, category, (const AsterixField **)current_uap[i]->field);
break;
case REPETITIVE:
asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA);
asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree);
for (j = 0, counter = 0; j < current_uap[i]->repetition_counter_size; j++) {
counter = (counter << 8) + tvb_get_guint8 (tvb, offset + start + j);
}
proto_tree_add_item (asterix_field_tree, hf_counter, tvb, offset + start, current_uap[i]->repetition_counter_size, ENC_BIG_ENDIAN);
for (j = 0, inner_offset = 0; j < counter; j++, inner_offset += current_uap[i]->length) {
asterix_field_item2 = proto_tree_add_item (asterix_field_tree, *current_uap[i]->hf, tvb, offset + start + current_uap[i]->repetition_counter_size + inner_offset, current_uap[i]->length, ENC_NA);
asterix_field_tree2 = proto_item_add_subtree (asterix_field_item2, ett_asterix_subtree);
asterix_build_subtree (tvb, pinfo, offset + start + current_uap[i]->repetition_counter_size + inner_offset, asterix_field_tree2, current_uap[i]);
}
break;
/* currently not generated from asterix-spec*/
/*case EXP:
asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA);
asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree);
proto_tree_add_item (asterix_field_tree, hf_re_field_len, tvb, offset + start, 1, ENC_BIG_ENDIAN);
start++;
fspec_len = asterix_fspec_len (tvb, offset + start);
proto_tree_add_item (asterix_field_tree, hf_asterix_fspec, tvb, offset + start, fspec_len, ENC_NA);
dissect_asterix_fields (tvb, pinfo, offset + start, asterix_field_tree, category, (const AsterixField **)current_uap[i]->field);
break;*/
default: /* FIXED, FX, FX_1, FX_UAP */
asterix_field_item = proto_tree_add_item (tree, *current_uap[i]->hf, tvb, offset + start, len, ENC_NA);
asterix_field_tree = proto_item_add_subtree (asterix_field_item, ett_asterix_subtree);
asterix_build_subtree (tvb, pinfo, offset + start, asterix_field_tree, current_uap[i]);
break;
}
}
}
return size;
}
static void asterix_build_subtree (tvbuff_t *tvb, packet_info *pinfo, unsigned offset, proto_tree *parent, const AsterixField *field)
{
header_field_info* hfi;
int bytes_in_type, byte_offset_of_mask;
int i, inner_offset, offset_in_tvb, length_in_tvb;
uint8_t go_on;
int64_t value;
char *str_buffer = NULL;
double scaling_factor = 1.0;
uint8_t *air_speed_im_bit;
if (field->part != NULL) {
for (i = 0, inner_offset = 0, go_on = 1; go_on && field->part[i] != NULL; i++) {
value = tvb_get_bits64 (tvb, offset * 8 + inner_offset, field->part[i]->bit_length, ENC_BIG_ENDIAN);
if (field->part[i]->hf != NULL) {
offset_in_tvb = offset + inner_offset / 8;
length_in_tvb = (inner_offset % 8 + field->part[i]->bit_length + 7) / 8;
switch (field->part[i]->type) {
case FIELD_PART_FX:
if (!value) go_on = 0;
/* Fall through */
case FIELD_PART_INT:
case FIELD_PART_UINT:
case FIELD_PART_HEX:
case FIELD_PART_ASCII:
case FIELD_PART_SQUAWK:
hfi = proto_registrar_get_nth (*field->part[i]->hf);
if (hfi->bitmask)
{
// for a small bit field to decode correctly with
// a mask that belongs to a large(r) one we need to
// re-adjust offset_in_tvb and length_in_tvb to
// correctly align with the given hf mask.
//
// E.g. the following would not decode correctly:
// { &hf_020_050_V, ... FT_UINT16, ... 0x8000, ...
// instead one would have to use
// { &hf_020_050_V, ... FT_UINT8, ... 0x80, ...
//
bytes_in_type = ftype_wire_size(hfi->type);
if (bytes_in_type > 1)
{
byte_offset_of_mask = bytes_in_type - (ws_ilog2 (hfi->bitmask) + 8)/8;
if (byte_offset_of_mask >= 0)
{
offset_in_tvb -= byte_offset_of_mask;
length_in_tvb = bytes_in_type;
}
}
}
proto_tree_add_item (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, ENC_BIG_ENDIAN);
break;
case FIELD_PART_FLOAT:
twos_complement (&value, field->part[i]->bit_length);
/* Fall through */
case FIELD_PART_UFLOAT:
scaling_factor = field->part[i]->scaling_factor;
if (field->part[i]->format_string != NULL)
proto_tree_add_double_format_value (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, value * scaling_factor, field->part[i]->format_string, value * scaling_factor);
else
proto_tree_add_double (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, value * scaling_factor);
break;
case FIELD_PART_CALLSIGN:
str_buffer = wmem_strdup_printf(
pinfo->pool,
"%c%c%c%c%c%c%c%c",
AISCode[(value >> 42) & 63],
AISCode[(value >> 36) & 63],
AISCode[(value >> 30) & 63],
AISCode[(value >> 24) & 63],
AISCode[(value >> 18) & 63],
AISCode[(value >> 12) & 63],
AISCode[(value >> 6) & 63],
AISCode[value & 63]);
proto_tree_add_string (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, str_buffer);
break;
case FIELD_PART_IAS_IM:
/* special processing for I021/150 and I062/380#4 because Air Speed depends on IM subfield */
air_speed_im_bit = wmem_new (pinfo->pool, uint8_t);
*air_speed_im_bit = (tvb_get_guint8 (tvb, offset_in_tvb) & 0x80) >> 7;
/* Save IM info for the packet. key = 21150. */
p_add_proto_data (pinfo->pool, pinfo, proto_asterix, 21150, air_speed_im_bit);
proto_tree_add_item (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, ENC_BIG_ENDIAN);
break;
case FIELD_PART_IAS_ASPD:
/* special processing for I021/150 and I062/380#4 because Air Speed depends on IM subfield */
air_speed_im_bit = (uint8_t *)p_get_proto_data (pinfo->pool, pinfo, proto_asterix, 21150);
if (!air_speed_im_bit || *air_speed_im_bit == 0)
scaling_factor = 1.0/16384.0;
else
scaling_factor = 0.001;
proto_tree_add_double (parent, *field->part[i]->hf, tvb, offset_in_tvb, length_in_tvb, value * scaling_factor);
break;
}
}
inner_offset += field->part[i]->bit_length;
}
} /* if not null */
}
static uint8_t asterix_bit (uint8_t b, uint8_t bitNo)
{
return bitNo < 8 && (b & (0x80 >> bitNo)) > 0;
}
/* Function makes int64_t two's complement.
* Only the bit_len bit are set in int64_t. All more significant
* bits need to be set to have proper two's complement.
* If the number is negative, all other bits must be set to 1.
* If the number is positive, all other bits must remain 0. */
static void twos_complement (int64_t *v, int bit_len)
{
if (*v & (G_GUINT64_CONSTANT(1) << (bit_len - 1))) {
*v |= (G_GUINT64_CONSTANT(0xffffffffffffffff) << bit_len);
}
}
static unsigned asterix_fspec_len (tvbuff_t *tvb, unsigned offset)
{
unsigned i;
unsigned max_length = tvb_reported_length (tvb) - offset;
for (i = 0; (tvb_get_guint8 (tvb, offset + i) & 1) && i < max_length; i++);
return i + 1;
}
static uint8_t asterix_field_exists (tvbuff_t *tvb, unsigned offset, int bitIndex)
{
uint8_t bitNo, i;
bitNo = bitIndex + bitIndex / 7;
for (i = 0; i < bitNo / 8; i++) {
if (!(tvb_get_guint8 (tvb, offset + i) & 1)) return 0;
}
return asterix_bit (tvb_get_guint8 (tvb, offset + i), bitNo % 8);
}
static int asterix_field_length (tvbuff_t *tvb, unsigned offset, const AsterixField *field)
{
unsigned size;
uint64_t count;
uint8_t i;
size = 0;
switch(field->type) {
case FIXED:
size = field->length;
break;
case REPETITIVE:
for (i = 0, count = 0; i < field->repetition_counter_size && i < sizeof (count); i++)
count = (count << 8) + tvb_get_guint8 (tvb, offset + i);
size = (unsigned)(field->repetition_counter_size + count * field->length);
break;
case FX_UAP:
case FX:
for (size = field->length + field->header_length; tvb_get_guint8 (tvb, offset + size - 1) & 1; size += field->length);
break;
case EXP:
for (i = 0, size = 0; i < field->header_length; i++) {
size = (size << 8) + tvb_get_guint8 (tvb, offset + i);
}
break;
case COMPOUND:
/* FSPEC */
for (size = 0; tvb_get_guint8 (tvb, offset + size) & 1; size++);
size++;
for (i = 0; field->field[i] != NULL; i++) {
if (asterix_field_exists (tvb, offset, i))
size += asterix_field_length (tvb, offset + size, field->field[i]);
}
break;
}
return size;
}
/* This works for category 001. For other it may require changes. */
static uint8_t asterix_get_active_uap (tvbuff_t *tvb, unsigned offset, uint8_t category)
{
int i, inner_offset;
AsterixField **current_uap;
if ((category == 1) && (categories[category] != NULL)) { /* if category is supported */
if (categories[category][global_categories_version[category]][1] != NULL) { /* if exists another uap */
current_uap = (AsterixField **)categories[category][global_categories_version[category]][0];
if (current_uap != NULL) {
inner_offset = asterix_fspec_len (tvb, offset);
for (i = 0; current_uap[i] != NULL; i++) {
if (asterix_field_exists (tvb, offset, i)) {
if (current_uap[i]->type == FX_UAP) {
return tvb_get_guint8 (tvb, offset + inner_offset) >> 7;
}
inner_offset += asterix_field_length (tvb, offset + inner_offset, current_uap[i]);
}
}
}
}
}
return 0;
}
static int asterix_field_offset (tvbuff_t *tvb, unsigned offset, const AsterixField *current_uap[], int field_index)
{
int i, inner_offset;
inner_offset = 0;
if (asterix_field_exists (tvb, offset, field_index)) {
inner_offset = asterix_fspec_len (tvb, offset);
for (i = 0; i < field_index; i++) {
if (asterix_field_exists (tvb, offset, i))
inner_offset += asterix_field_length (tvb, offset + inner_offset, current_uap[i]);
}
}
return inner_offset;
}
static int asterix_message_length (tvbuff_t *tvb, unsigned offset, uint8_t category, uint8_t active_uap)
{
int i, size;
AsterixField **current_uap;
if (categories[category] != NULL) { /* if category is supported */
current_uap = (AsterixField **)categories[category][global_categories_version[category]][active_uap];
if (current_uap != NULL) {
size = asterix_fspec_len (tvb, offset);
for (i = 0; current_uap[i] != NULL; i++) {
if (asterix_field_exists (tvb, offset, i)) {
size += asterix_field_length (tvb, offset + size, current_uap[i]);
}
}
return size;
}
}
return 0;
}
void proto_register_asterix (void)
{
static hf_register_info hf[] = {
{ &hf_asterix_category, { "Category", "asterix.category", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
{ &hf_asterix_length, { "Length", "asterix.length", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } },
{ &hf_asterix_message, { "Asterix message", "asterix.message", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
{ &hf_asterix_fspec, { "FSPEC", "asterix.fspec", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
{ &hf_re_field_len, { "RE LEN", "asterix.re_field_len", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } },
{ &hf_spare, { "Spare", "asterix.spare", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } },
{ &hf_counter, { "Counter", "asterix.counter", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } },
{ &hf_XXX_FX, { "FX", "asterix.FX", FT_UINT8, BASE_DEC, VALS (valstr_XXX_FX), 0x01, "Extension into next extent", HFILL } },
/* insert2 */
---{insert2}---
/* insert2 */
};
/* Setup protocol subtree array */
static int *ett[] = {
&ett_asterix,
&ett_asterix_category,
&ett_asterix_length,
&ett_asterix_message,
&ett_asterix_subtree
};
module_t *asterix_prefs_module;
proto_asterix = proto_register_protocol (
"ASTERIX packet", /* name */
"ASTERIX", /* short name */
"asterix" /* abbrev */
);
proto_register_field_array (proto_asterix, hf, array_length (hf));
proto_register_subtree_array (ett, array_length (ett));
asterix_handle = register_dissector ("asterix", dissect_asterix, proto_asterix);
asterix_prefs_module = prefs_register_protocol (proto_asterix, NULL);
/* insert3 */
---{insert3}---
/* insert3 */
}
void proto_reg_handoff_asterix (void)
{
dissector_add_uint_with_preference("udp.port", ASTERIX_PORT, asterix_handle);
}
/*
* Editor modelines - https://www.wireshark.org/tools/modelines.html
*
* Local variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* vi: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/ |
Markdown | wireshark/tools/asterix/README.md | # Asterix parser generator
*Asterix* is a set of standards, where each standard is defined
as so called *asterix category*.
In addition, each *asterix category* is potentially released
in number of editions. There is no guarantie about backward
compatibility between the editions.
The structured version of asterix specifications is maintained
in a separate project:
<https://zoranbosnjak.github.io/asterix-specs/specs.html>
The purpose of this directory is to convert from structured
specifications (json format) to the `epan/dissectors/packet-asterix.c` file,
which is the actual asterix parser for this project.
It is important **NOT** to edit `epan/dissectors/packet-asterix.c` file
manually, since this file is automatically generated.
## Manual update procedure
To sync with the upstream asterix specifications, run:
```bash
# show current upstream git revision (for reference)
export ASTERIX_SPECS_REV=$(./tools/asterix/update-specs.py --reference)
echo $ASTERIX_SPECS_REV
# update asterix decoder
./tools/asterix/update-specs.py > epan/dissectors/packet-asterix.c
git add epan/dissectors/packet-asterix.c
# inspect change, rebuild project, test...
# commit change, with reference to upstream version
git commit -m "asterix: Sync with asterix-specs #$ASTERIX_SPECS_REV"
```
## Automatic update procedure
To integrate asterix updates to a periodic (GitLab CI) job, use `--update` option.
For example:
```
...
# Asterix categories.
- ./tools/asterix/update-specs.py --update || echo "asterix failed." >> commit-message.txt
- COMMIT_FILES+=("epan/dissectors/packet-asterix.c")
...
``` |
Python | wireshark/tools/asterix/update-specs.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# By Zoran Bošnjak <[email protected]>
#
# Use asterix specifications in JSON format,
# to generate C/C++ structures, suitable for wireshark.
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
import argparse
import urllib.request
import json
from copy import copy
from itertools import chain, repeat
from functools import reduce
import os
import sys
import re
# Path to default upstream repository
upstream_repo = 'https://zoranbosnjak.github.io/asterix-specs'
dissector_file = 'epan/dissectors/packet-asterix.c'
class Offset(object):
"""Keep track of number of added bits.
It's like integer, except when offsets are added together,
a 'modulo 8' is applied, such that offset is always between [0,7].
"""
def __init__(self):
self.current = 0
def __add__(self, other):
self.current = (self.current + other) % 8
return self
@property
def get(self):
return self.current
class Context(object):
"""Support class to be used as a context manager.
The 'tell' method is used to output (print) some data.
All output is first collected to a buffer, then rendered
using a template file.
"""
def __init__(self):
self.buffer = {}
self.offset = Offset()
self.inside_extended = None
self.inside_repetitive = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
pass
def tell(self, channel, s):
"""Append string 's' to an output channel."""
lines = self.buffer.get(channel, [])
lines.append(s)
self.buffer[channel] = lines
def reset_offset(self):
self.offset = Offset()
def get_number(value):
"""Get Natural/Real/Rational number as an object."""
class Integer(object):
def __init__(self, val):
self.val = val
def __str__(self):
return '{}'.format(self.val)
def __float__(self):
return float(self.val)
class Ratio(object):
def __init__(self, a, b):
self.a = a
self.b = b
def __str__(self):
return '{}/{}'.format(self.a, self.b)
def __float__(self):
return float(self.a) / float(self.b)
class Real(object):
def __init__(self, val):
self.val = val
def __str__(self):
return '{0:f}'.format(self.val).rstrip('0')
def __float__(self):
return float(self.val)
t = value['type']
val = value['value']
if t == 'Integer':
return Integer(int(val))
if t == 'Ratio':
x, y = val['numerator'], val['denominator']
return Ratio(x, y)
if t == 'Real':
return Real(float(val))
raise Exception('unexpected value type {}'.format(t))
def replace_string(s, mapping):
"""Helper function to replace each entry from the mapping."""
for (key,val) in mapping.items():
s = s.replace(key, val)
return s
def safe_string(s):
"""String replacement table."""
return replace_string(s, {
# from C reference manual
chr(92): r"\\", # Backslash character.
'?': r"\?", # Question mark character.
"'": r"\'", # Single quotation mark.
'"': r'\"', # Double quotation mark.
"\a": "", # Audible alert.
"\b": "", # Backspace character.
"\e": "", # <ESC> character. (This is a GNU extension.)
"\f": "", # Form feed.
"\n": "", # Newline character.
"\r": "", # Carriage return.
"\t": " ", # Horizontal tab.
"\v": "", # Vertical tab.
})
def get_scaling(content):
"""Get scaling factor from the content."""
k = content.get('scaling')
if k is None:
return None
k = get_number(k)
fract = content['fractionalBits']
if fract > 0:
scale = format(float(k) / (pow(2, fract)), '.29f')
scale = scale.rstrip('0')
else:
scale = format(float(k))
return scale
def get_fieldpart(content):
"""Get FIELD_PART* from the content."""
t = content['type']
if t == 'Raw': return 'FIELD_PART_HEX'
elif t == 'Table': return 'FIELD_PART_UINT'
elif t == 'String':
var = content['variation']
if var == 'StringAscii': return 'FIELD_PART_ASCII'
elif var == 'StringICAO': return 'FIELD_PART_CALLSIGN'
elif var == 'StringOctal': return 'FIELD_PART_SQUAWK'
else:
raise Exception('unexpected string variation: {}'.format(var))
elif t == 'Integer':
if content['signed']:
return 'FIELD_PART_INT'
else:
return 'FIELD_PART_UINT'
elif t == 'Quantity':
if content['signed']:
return 'FIELD_PART_FLOAT'
else:
return 'FIELD_PART_UFLOAT'
elif t == 'Bds':
return 'FIELD_PART_HEX'
else:
raise Exception('unexpected content type: {}'.format(t))
def download_url(path):
"""Download url and return content as a string."""
with urllib.request.urlopen(upstream_repo + path) as url:
return url.read()
def read_file(path):
"""Read file content, return string."""
with open(path) as f:
return f.read()
def load_jsons(paths):
"""Load json files from either URL or from local disk."""
# load from url
if paths == []:
manifest = download_url('/manifest.json').decode()
listing = []
for spec in json.loads(manifest):
cat = spec['category']
for edition in spec['cats']:
listing.append('/specs/cat{}/cats/cat{}/definition.json'.format(cat, edition))
for edition in spec['refs']:
listing.append('/specs/cat{}/refs/ref{}/definition.json'.format(cat, edition))
return [download_url(i).decode() for i in listing]
# load from disk
else:
listing = []
for path in paths:
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for i in files:
(a,b) = os.path.splitext(i)
if (a,b) != ('definition', '.json'):
continue
listing.append(os.path.join(root, i))
elif os.path.isfile(path):
listing.append(path)
else:
raise Exception('unexpected path type: {}'.path)
return [read_file(f) for f in listing]
def load_gitrev(paths):
"""Read git revision reference."""
# load from url
if paths == []:
gitrev = download_url('/gitrev.txt').decode().strip()
return [upstream_repo, 'git revision: {}'.format(gitrev)]
# load from disk
else:
return ['(local disk)']
def get_ft(ref, n, content, offset):
"""Get FT... from the content."""
a = offset.get
# bruto bit size (next multiple of 8)
(m, b) = divmod(a+n, 8)
m = m if b == 0 else m + 1
m *= 8
mask = '0x00'
if a != 0 or b != 0:
bits = chain(repeat(0, a), repeat(1, n), repeat(0, m-n-a))
mask = 0
for (a,b) in zip(bits, reversed(range(m))):
mask += a*pow(2,b)
mask = hex(mask)
# prefix mask with zeros '0x000...', to adjust mask size
assert mask[0:2] == '0x'
mask = mask[2:]
required_mask_size = (m//8)*2
add_some = required_mask_size - len(mask)
mask = '0x' + '0'*add_some + mask
t = content['type']
if t == 'Raw':
if n > 64: # very long items
assert (n % 8) == 0, "very long items require byte alignment"
return 'FT_NONE, BASE_NONE, NULL, 0x00'
if (n % 8): # not byte aligned
base = 'DEC'
else: # byte aligned
if n >= 32: # long items
base = 'HEX'
else: # short items
base = 'HEX_DEC'
return 'FT_UINT{}, BASE_{}, NULL, {}'.format(m, base, mask)
elif t == 'Table':
return 'FT_UINT{}, BASE_DEC, VALS (valstr_{}), {}'.format(m, ref, mask)
elif t == 'String':
var = content['variation']
if var == 'StringAscii':
return 'FT_STRING, BASE_NONE, NULL, {}'.format(mask)
elif var == 'StringICAO':
return 'FT_STRING, BASE_NONE, NULL, {}'.format(mask)
elif var == 'StringOctal':
return 'FT_UINT{}, BASE_OCT, NULL, {}'.format(m, mask)
else:
raise Exception('unexpected string variation: {}'.format(var))
elif t == 'Integer':
signed = content['signed']
if signed:
return 'FT_INT{}, BASE_DEC, NULL, {}'.format(m, mask)
else:
return 'FT_UINT{}, BASE_DEC, NULL, {}'.format(m, mask)
elif t == 'Quantity':
return 'FT_DOUBLE, BASE_NONE, NULL, 0x00'
elif t == 'Bds':
return 'FT_UINT{}, BASE_DEC, NULL, {}'.format(m, mask)
else:
raise Exception('unexpected content type: {}'.format(t))
def reference(cat, edition, path):
"""Create reference string."""
name = '_'.join(path)
if edition is None:
return('{:03d}_{}'.format(cat, name))
return('{:03d}_V{}_{}_{}'.format(cat, edition['major'], edition['minor'], name))
def get_content(rule):
t = rule['type']
# Most cases are 'ContextFree', use as specified.
if t == 'ContextFree':
return rule['content']
# Handle 'Dependent' contents as 'Raw'.
elif t == 'Dependent':
return {'type': "Raw"}
else:
raise Exception('unexpected type: {}'.format(t))
def get_bit_size(item):
"""Return bit size of a (spare) item."""
if item['spare']:
return item['length']
else:
return item['variation']['size']
def get_description(item, content=None):
"""Return item description."""
name = item['name'] if not is_generated(item) else None
title = item.get('title')
if content is not None and content.get('unit'):
unit = '[{}]'.format(safe_string(content['unit']))
else:
unit = None
parts = filter(lambda x: bool(x), [name, title, unit])
if not parts:
return ''
return reduce(lambda a,b: a + ', ' + b, parts)
def generate_group(item, variation=None):
"""Generate group-item from element-item."""
level2 = copy(item)
level2['name'] = 'VALUE'
level2['is_generated'] = True
if variation is None:
level1 = copy(item)
level1['variation'] = {
'type': 'Group',
'items': [level2],
}
else:
level2['variation'] = variation['variation']
level1 = {
'type': "Group",
'items': [level2],
}
return level1
def is_generated(item):
return item.get('is_generated') is not None
def ungroup(item):
"""Convert group of items of known size to element"""
n = sum([get_bit_size(i) for i in item['variation']['items']])
result = copy(item)
result['variation'] = {
'rule': {
'content': {'type': 'Raw'},
'type': 'ContextFree',
},
'size': n,
'type': 'Element',
}
return result
def part1(ctx, get_ref, catalogue):
"""Generate components in order
- static int hf_...
- FiledPart
- FieldPart[]
- AsterixField
"""
tell = lambda s: ctx.tell('insert1', s)
tell_pr = lambda s: ctx.tell('insert2', s)
ctx.reset_offset()
ctx.inside_extended = None
def handle_item(path, item):
"""Handle 'spare' or regular 'item'.
This function is used recursively, depending on the item structure.
"""
def handle_variation(path, variation):
"""Handle 'Element, Group...' variations.
This function is used recursively, depending on the item structure."""
t = variation['type']
ref = get_ref(path)
def part_of(item):
if item['spare']:
return '&IXXX_{}bit_spare'.format(item['length'])
return '&I{}_{}'.format(ref, item['name'])
if t == 'Element':
tell('static int hf_{} = -1;'.format(ref))
n = variation['size']
content = get_content(variation['rule'])
scaling = get_scaling(content)
scaling = scaling if scaling is not None else 1.0
fp = get_fieldpart(content)
if content['type'] == 'Table':
tell('static const value_string valstr_{}[] = {}'.format(ref, '{'))
for (a,b) in content['values']:
tell(' {} {}, "{}" {},'.format('{', a, safe_string(b), '}'))
tell(' {} 0, NULL {}'.format('{', '}'))
tell('};')
tell('static const FieldPart I{} = {} {}, {}, {}, &hf_{}, NULL {};'.format(ref, '{', n, scaling, fp, ref, '}'))
description = get_description(item, content)
ft = get_ft(ref, n, content, ctx.offset)
tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", {}, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, ft, '}', '}'))
ctx.offset += n
if ctx.inside_extended is not None:
n, rest = ctx.inside_extended
if ctx.offset.get + 1 > n:
raise Exception("unexpected offset")
# FX bit
if ctx.offset.get + 1 == n:
ctx.offset += 1
m = next(rest)
ctx.inside_extended = (m, rest)
elif t == 'Group':
ctx.reset_offset()
description = get_description(item)
tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}'))
tell('static int hf_{} = -1;'.format(ref))
for i in variation['items']:
handle_item(path, i)
# FieldPart[]
tell('static const FieldPart *I{}_PARTS[] = {}'.format(ref,'{'))
for i in variation['items']:
tell(' {},'.format(part_of(i)))
tell(' NULL')
tell('};')
# AsterixField
bit_size = sum([get_bit_size(i) for i in variation['items']])
byte_size = bit_size // 8
parts = 'I{}_PARTS'.format(ref)
comp = '{ NULL }'
if not ctx.inside_repetitive:
tell('static const AsterixField I{} = {} FIXED, {}, 0, 0, &hf_{}, {}, {} {};'.format
(ref, '{', byte_size, ref, parts, comp, '}'))
elif t == 'Extended':
n1 = variation['first']
n2 = variation['extents']
ctx.reset_offset()
ctx.inside_extended = (n1, chain(repeat(n1,1), repeat(n2)))
description = get_description(item)
tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}'))
tell('static int hf_{} = -1;'.format(ref))
items = []
for i in variation['items']:
if i.get('variation') is not None:
if i['variation']['type'] == 'Group':
i = ungroup(i)
items.append(i)
for i in items:
handle_item(path, i)
tell('static const FieldPart *I{}_PARTS[] = {}'.format(ref,'{'))
chunks = chain(repeat(n1,1), repeat(n2))
# iterate over items, reinsert FX bits
while True:
bit_size = next(chunks)
assert (bit_size % 8) == 0, "bit alignment error"
byte_size = bit_size // 8
bits_from = bit_size
while True:
i = items[0]
items = items[1:]
n = get_bit_size(i)
tell(' {},'.format(part_of(i)))
bits_from -= n
if bits_from <= 1:
break
tell(' &IXXX_FX,')
if not items:
break
tell(' NULL')
tell('};')
# AsterixField
n1 = variation['first'] // 8
n2 = variation['extents'] // 8
parts = 'I{}_PARTS'.format(ref)
comp = '{ NULL }'
tell('static const AsterixField I{} = {} FX, {}, 0, {}, &hf_{}, {}, {} {};'.format
(ref, '{', n2, n1 - 1, ref, parts, comp, '}'))
ctx.inside_extended = None
elif t == 'Repetitive':
ctx.reset_offset()
ctx.inside_repetitive = True
# Group is required below this item.
if variation['variation']['type'] == 'Element':
subvar = generate_group(item, variation)
else:
subvar = variation['variation']
handle_variation(path, subvar)
# AsterixField
bit_size = sum([get_bit_size(i) for i in subvar['items']])
byte_size = bit_size // 8
rep = variation['rep']['size'] // 8
parts = 'I{}_PARTS'.format(ref)
comp = '{ NULL }'
tell('static const AsterixField I{} = {} REPETITIVE, {}, {}, 0, &hf_{}, {}, {} {};'.format
(ref, '{', byte_size, rep, ref, parts, comp, '}'))
ctx.inside_repetitive = False
elif t == 'Explicit':
ctx.reset_offset()
tell('static int hf_{} = -1;'.format(ref))
description = get_description(item)
tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}'))
tell('static const AsterixField I{} = {} EXP, 0, 0, 1, &hf_{}, NULL, {} NULL {} {};'.format(ref, '{', ref, '{', '}', '}'))
elif t == 'Compound':
ctx.reset_offset()
tell('static int hf_{} = -1;'.format(ref))
description = get_description(item)
tell_pr(' {} &hf_{}, {} "{}", "asterix.{}", FT_NONE, BASE_NONE, NULL, 0x00, NULL, HFILL {} {},'.format('{', ref, '{', description, ref, '}', '}'))
comp = '{'
for i in variation['items']:
if i is None:
comp += ' &IX_SPARE,'
continue
# Group is required below this item.
if i['variation']['type'] == 'Element':
subitem = generate_group(i)
else:
subitem = i
comp += ' &I{}_{},'.format(ref, subitem['name'])
handle_item(path, subitem)
comp += ' NULL }'
# AsterixField
tell('static const AsterixField I{} = {} COMPOUND, 0, 0, 0, &hf_{}, NULL, {} {};'.format
(ref, '{', ref, comp, '}'))
else:
raise Exception('unexpected variation type: {}'.format(t))
if item['spare']:
ctx.offset += item['length']
return
# Group is required on the first level.
if path == [] and item['variation']['type'] == 'Element':
variation = generate_group(item)['variation']
else:
variation = item['variation']
handle_variation(path + [item['name']], variation)
for item in catalogue:
# adjust 'repetitive fx' item
if item['variation']['type'] == 'Repetitive' and item['variation']['rep']['type'] == 'Fx':
var = item['variation']['variation'].copy()
if var['type'] != 'Element':
raise Exception("Expecting 'Element'")
n = var['size']
item = item.copy()
item['variation'] = {
'type': 'Extended',
'first': n+1,
'extents': n+1,
'fx': 'Regular',
'items': [{
'definition': None,
'description': None,
'name': 'Subitem',
'remark': None,
'spare': False,
'title': 'Subitem',
'variation': var,
}]
}
handle_item([], item)
tell('')
def part2(ctx, ref, uap):
"""Generate UAPs"""
tell = lambda s: ctx.tell('insert1', s)
tell('DIAG_OFF_PEDANTIC')
ut = uap['type']
if ut == 'uap':
variations = [{'name': 'uap', 'items': uap['items']}]
elif ut == 'uaps':
variations = uap['variations']
else:
raise Exception('unexpected uap type {}'.format(ut))
for var in variations:
tell('static const AsterixField *I{}_{}[] = {}'.format(ref, var['name'], '{'))
for i in var['items']:
if i is None:
tell(' &IX_SPARE,')
else:
tell(' &I{}_{},'.format(ref, i))
tell(' NULL')
tell('};')
tell('static const AsterixField **I{}[] = {}'.format(ref, '{'))
for var in variations:
tell(' I{}_{},'.format(ref, var['name']))
tell(' NULL')
tell('};')
tell('DIAG_ON_PEDANTIC')
tell('')
def part3(ctx, specs):
"""Generate
- static const AsterixField ***...
- static const enum_val_t ..._versions[]...
"""
tell = lambda s: ctx.tell('insert1', s)
def fmt_edition(cat, edition):
return 'I{:03d}_V{}_{}'.format(cat, edition['major'], edition['minor'])
cats = set([spec['number'] for spec in specs])
for cat in sorted(cats):
lst = [spec for spec in specs if spec['number'] == cat]
editions = sorted([val['edition'] for val in lst], key = lambda x: (x['major'], x['minor']), reverse=True)
editions_fmt = [fmt_edition(cat, edition) for edition in editions]
editions_str = ', '.join(['I{:03d}'.format(cat)] + editions_fmt)
tell('DIAG_OFF_PEDANTIC')
tell('static const AsterixField ***I{:03d}all[] = {} {} {};'.format(cat, '{', editions_str, '}'))
tell('DIAG_ON_PEDANTIC')
tell('')
tell('static const enum_val_t I{:03d}_versions[] = {}'.format(cat, '{'))
edition = editions[0]
a = edition['major']
b = edition['minor']
tell(' {} "I{:03d}", "Version {}.{} (latest)", 0 {},'.format('{', cat, a, b, '}'))
for ix, edition in enumerate(editions, start=1):
a = edition['major']
b = edition['minor']
tell(' {} "I{:03d}_v{}_{}", "Version {}.{}", {} {},'.format('{', cat, a, b, a, b, ix, '}'))
tell(' { NULL, NULL, 0 }')
tell('};')
tell('')
def part4(ctx, cats):
"""Generate
- static const AsterixField ****categories[]...
- prefs_register_enum_preference ...
"""
tell = lambda s: ctx.tell('insert1', s)
tell_pr = lambda s: ctx.tell('insert3', s)
tell('static const AsterixField ****categories[] = {')
for i in range(0, 256):
val = 'I{:03d}all'.format(i) if i in cats else 'NULL'
tell(' {}, /* {:03d} */'.format(val, i))
tell(' NULL')
tell('};')
for cat in sorted(cats):
tell_pr(' prefs_register_enum_preference (asterix_prefs_module, "i{:03d}_version", "I{:03d} version", "Select the CAT{:03d} version", &global_categories_version[{}], I{:03d}_versions, false);'.format(cat, cat, cat, cat, cat))
class Output(object):
"""Output context manager. Write either to stdout or to a dissector
file directly, depending on 'update' argument"""
def __init__(self, update):
self.update = update
self.f = None
def __enter__(self):
if self.update:
self.f = open(dissector_file, 'w')
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.f is not None:
self.f.close()
def dump(self, line):
if self.f is None:
print(line)
else:
self.f.write(line+'\n')
def is_valid(spec):
"""Check spec"""
def check_item(item):
if item['spare']:
return True
return check_variation(item['variation'])
def check_variation(variation):
t = variation['type']
if t == 'Element':
return True
elif t == 'Group':
return all([check_item(i) for i in variation['items']])
elif t == 'Extended':
n1 = variation['first']
n2 = variation['extents']
fx = variation['fx']
if fx != 'Regular':
return False # 'iregular extended item'
return all([check_item(i) for i in variation['items']])
elif t == 'Repetitive':
return check_variation(variation['variation'])
elif t == 'Explicit':
return True
elif t == 'Compound':
items = [i for i in variation['items'] if i is not None]
return all([check_item(i) for i in items])
else:
raise Exception('unexpected variation type {}'.format(t))
return all([check_item(i) for i in spec['catalogue']])
def main():
parser = argparse.ArgumentParser(description='Process asterix specs files.')
parser.add_argument('paths', metavar='PATH', nargs='*',
help='json spec file(s), use upstream repository in no input is given')
parser.add_argument('--reference', action='store_true',
help='print upstream reference and exit')
parser.add_argument("--update", action="store_true",
help="Update %s as needed instead of writing to stdout" % dissector_file)
args = parser.parse_args()
if args.reference:
gitrev_short = download_url('/gitrev.txt').decode().strip()[0:10]
print(gitrev_short)
sys.exit(0)
# read and json-decode input files
jsons = load_jsons(args.paths)
jsons = [json.loads(i) for i in jsons]
jsons = sorted(jsons, key = lambda x: (x['number'], x['edition']['major'], x['edition']['minor']))
jsons = [spec for spec in jsons if spec['type'] == 'Basic']
jsons = [spec for spec in jsons if is_valid(spec)]
cats = list(set([x['number'] for x in jsons]))
latest_editions = {cat: sorted(
filter(lambda x: x['number'] == cat, jsons),
key = lambda x: (x['edition']['major'], x['edition']['minor']), reverse=True)[0]['edition']
for cat in cats}
# regular expression for template rendering
ins = re.compile(r'---\{([A-Za-z0-9_]*)\}---')
gitrev = load_gitrev(args.paths)
with Context() as ctx:
for i in gitrev:
ctx.tell('gitrev', i)
# generate parts into the context buffer
for spec in jsons:
is_latest = spec['edition'] == latest_editions[spec['number']]
ctx.tell('insert1', '/* Category {:03d}, edition {}.{} */'.format(spec['number'], spec['edition']['major'], spec['edition']['minor']))
# handle part1
get_ref = lambda path: reference(spec['number'], spec['edition'], path)
part1(ctx, get_ref, spec['catalogue'])
if is_latest:
ctx.tell('insert1', '/* Category {:03d}, edition {}.{} (latest) */'.format(spec['number'], spec['edition']['major'], spec['edition']['minor']))
get_ref = lambda path: reference(spec['number'], None, path)
part1(ctx, get_ref, spec['catalogue'])
# handle part2
cat = spec['number']
edition = spec['edition']
ref = '{:03d}_V{}_{}'.format(cat, edition['major'], edition['minor'])
part2(ctx, ref, spec['uap'])
if is_latest:
ref = '{:03d}'.format(cat)
part2(ctx, ref, spec['uap'])
part3(ctx, jsons)
part4(ctx, set([spec['number'] for spec in jsons]))
# use context buffer to render template
script_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(script_path, 'packet-asterix-template.c')) as f:
template_lines = f.readlines()
# All input is collected and rendered.
# It's safe to update the disector.
# copy each line of the template to required output,
# if the 'insertion' is found in the template,
# replace it with the buffer content
with Output(args.update) as out:
for line in template_lines:
line = line.rstrip()
insertion = ins.match(line)
if insertion is None:
out.dump(line)
else:
segment = insertion.group(1)
[out.dump(i) for i in ctx.buffer[segment]]
if __name__ == '__main__':
main() |
Shell Script | wireshark/tools/cppcheck/cppcheck.sh | #!/bin/bash
#
# cppcheck.sh
# Script to run CppCheck Static Analyzer.
# http://cppcheck.sourceforge.net/
#
# Usage: tools/cppcheck/cppcheck.sh [options] [file]
# Where options can be:
# -a disable suppression list (see $CPPCHECK_DIR/suppressions)
# -c colorize html output
# -h html output (default is gcc)
# -x xml output (default is gcc)
# -j n threads (default: 4)
# -l n check files from the last [n] commits
# -o check modified files
# -v quiet mode
# If argument file is omitted then checking all files in the current directory.
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 2012 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
CPPCHECK=$(type -p cppcheck)
CPPCHECK_DIR=$(dirname "$0")
if [ -z "$CPPCHECK" ] ; then
echo "cppcheck not found"
exit 1
fi
THREADS=4
LAST_COMMITS=0
TARGET=""
QUIET="--quiet"
SUPPRESSIONS="--suppressions-list=$CPPCHECK_DIR/suppressions"
INCLUDES="--includes-file=$CPPCHECK_DIR/includes"
MODE="gcc"
COLORIZE_HTML_MODE="no"
OPEN_FILES="no"
XML_ARG=""
colorize_worker()
{
# always uses stdin/stdout
[ "$COLORIZE_HTML_MODE" = "yes" ] && \
sed -e '/<td>warning<\/td>/s/^<tr>/<tr bgcolor="#ff3">/' \
-e '/<td>error<\/td>/s/^<tr>/<tr bgcolor="#faa">/' \
|| sed ''
}
# switcher
colorize()
{
[ -z "$1" ] && colorize_worker || colorize_worker <<< "$1"
}
exit_cleanup() {
if [ "$MODE" = "html" ]; then
echo "</table></body></html>"
fi
if [ -n "$1" ] ; then
exit "$1"
fi
}
while getopts "achxj:l:ov" OPTCHAR ; do
case $OPTCHAR in
a) SUPPRESSIONS=" " ;;
c) COLORIZE_HTML_MODE="yes" ;;
h) MODE="html" ;;
x) MODE="xml" ;;
j) THREADS="$OPTARG" ;;
l) LAST_COMMITS="$OPTARG" ;;
o) OPEN_FILES="yes" ;;
v) QUIET=" " ;;
*) printf "Unknown option %s" "$OPTCHAR"
esac
done
shift $(( OPTIND - 1 ))
if [ "$MODE" = "gcc" ]; then
TEMPLATE="gcc"
elif [ "$MODE" = "html" ]; then
echo "<html><body><table border=1>"
echo "<tr><th>File</th><th>Line</th><th>Severity</th>"
echo "<th>Message</th><th>ID</th></tr>"
TEMPLATE="<tr><td>{file}</td><td>{line}</td><td>{severity}</td><td>{message}</td><td>{id}</td></tr>"
fi
# Ensure that the COLORIZE_HTML_MODE option is used only with HTML-mode and not with GCC-mode.
[ "$MODE" = "html" ] && [ "$COLORIZE_HTML_MODE" = "yes" ] || COLORIZE_HTML_MODE="no"
if [ "$LAST_COMMITS" -gt 0 ] ; then
TARGET=$( git diff --name-only --diff-filter=d HEAD~"$LAST_COMMITS".. | grep -E '\.(c|cpp)$' )
if [ -z "${TARGET//[[:space:]]/}" ] ; then
>&2 echo "No C or C++ files found in the last $LAST_COMMITS commit(s)."
exit_cleanup 0
fi
fi
if [ "$OPEN_FILES" = "yes" ] ; then
TARGET=$(git diff --name-only | grep -E '\.(c|cpp)$' )
TARGET="$TARGET $(git diff --staged --name-only | grep -E '\.(c|cpp)$' )"
if [ -z "${TARGET//[[:space:]]/}" ] ; then
>&2 echo "No C or C++ files are currently opened (modified or added for next commit)."
exit_cleanup 0
fi
fi
if [ $# -gt 0 ]; then
TARGET="$TARGET $*"
fi
if [ -z "$TARGET" ] ; then
TARGET=.
fi
if [ "$MODE" = "xml" ]; then
XML_ARG="--xml"
fi
# Use a little-documented feature of the shell to pass SIGINTs only to the
# child process (cppcheck in this case). That way the final 'echo' still
# runs and we aren't left with broken HTML.
trap : INT
if [ "$QUIET" = " " ]; then
echo "Examining:"
echo $TARGET
echo
fi
# shellcheck disable=SC2086
$CPPCHECK --force --enable=style $QUIET \
$SUPPRESSIONS $INCLUDES \
-i doc/ \
-i epan/dissectors/asn1/ \
--std=c11 --template=$TEMPLATE \
-j $THREADS $TARGET $XML_ARG 2>&1 | colorize
exit_cleanup
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
# |
Python | wireshark/tools/json2pcap/json2pcap.py | #!/usr/bin/env python3
#
# Copyright 2020, Martin Kacer <kacer.martin[AT]gmail.com> and contributors
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
import sys
import ijson
import operator
import copy
import binascii
import array
import argparse
import string
import random
import math
import hashlib
import re
from collections import OrderedDict
from scapy import all as scapy
# Field anonymization class
class AnonymizedField:
'''
The Anonymization field object specifying anonymization
:filed arg: field name
:type arg: anonymization type [0 masking 0xff, 1 anonymization shake_256]
:start arg: If specified, the anonymization starts at given byte number
:end arg: If specified, the anonymization ends at given byte number
'''
def __init__(self, field, type):
self.field = field
self.type = type
self.start = None
self.end = None
match = re.search(r'(\S+)\[(-?\d+)?:(-?\d+)?\]', field)
if match:
self.field = match.group(1)
self.start = match.group(2)
if self.start is not None:
self.start = int(self.start)
self.end = match.group(3)
if self.end is not None:
self.end = int(self.end)
# Returns the new field value after anonymization
def anonymize_field_shake256(self, field, type, salt):
shake = hashlib.shake_256(str(field + ':' + salt).encode('utf-8'))
# String type, output should be ASCII
if type in [26, 27, 28]:
length = math.ceil(len(field)/4)
shake_hash = shake.hexdigest(length)
ret_string = array.array('B', str.encode(shake_hash))
ret_string = ''.join('{:02x}'.format(x) for x in ret_string)
# Other types, output could be HEX
else:
length = math.ceil(len(field)/2)
shake_hash = shake.hexdigest(length)
ret_string = shake_hash
# Correct the string length
if (len(ret_string) < len(field)):
ret_string = ret_string.ljust(len(field))
if (len(ret_string) > len(field)):
ret_string = ret_string[:len(field)]
return ret_string
def anonymize_field(self, _h, _t, salt):
s = 0
e = None
if self.start:
s = self.start
if self.end:
e = self.end
if e < 0:
e = len(_h) + e
else:
e = len(_h)
h = _h[s:e]
if self.type == 0:
h = 'f' * len(h)
elif self.type == 1:
h = self.anonymize_field_shake256(h, _t, salt)
h_mask = '0' * len(_h[0:s]) + 'f' * len(h) + '0' * len(_h[e:])
h = _h[0:s] + h + _h[e:]
return [h, h_mask]
def make_unique(key, dct):
counter = 0
unique_key = key
while unique_key in dct:
counter += 1
unique_key = '{}_{}'.format(key, counter)
return unique_key
def parse_object_pairs(pairs):
dct = OrderedDict()
for key, value in pairs:
if key in dct:
key = make_unique(key, dct)
dct[key] = value
return dct
#
# ********* PY TEMPLATES *********
#
def read_py_function(name):
s = ''
record = False
indent = 0
file = open(__file__)
for line in file:
ind = len(line) - len(line.lstrip())
if line.find("def " + name) != -1:
record = True
indent = ind
elif record and indent == ind and len(line) > 1:
record = False
if record:
s = s + line
file.close()
return s
py_header = """#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File generated by json2pcap.py
# json2pcap.py created by Martin Kacer, 2020
import os
import binascii
import array
import sys
import subprocess
from collections import OrderedDict
from scapy import all as scapy
# *****************************************************
# * PACKET PAYLOAD GENERATED FROM INPUT PCAP *
# * Modify this function to edit the packet *
# *****************************************************
def main():
d = OrderedDict()
"""
py_footer = """ generate_pcap(d)
# *****************************************************
# * FUNCTIONS from TEMPLATE *
# * Do not edit these functions if not required *
# *****************************************************
"""
py_footer = py_footer + read_py_function("to_bytes")
py_footer = py_footer + read_py_function("lsb")
py_footer = py_footer + read_py_function("multiply_strings")
py_footer = py_footer + read_py_function("rewrite_frame")
py_footer = py_footer + read_py_function("assemble_frame")
py_footer = py_footer + read_py_function("generate_pcap")
py_footer = py_footer + """
if __name__ == '__main__':
main()
"""
#
# ***** End of PY TEMPLATES ******
#
#
# ********** FUNCTIONS ***********
#
def raw_flat_collector(dict):
if hasattr(dict, 'items'):
for k, v in dict.items():
if k.endswith("_raw"):
yield k, v
else:
for val in raw_flat_collector(v):
yield val
# d - input dictionary, parsed from json
# r - result dictionary
# frame_name - parent protocol name
# frame_position - parent protocol position
def py_generator(d, r, frame_name='frame_raw', frame_position=0):
if (d is None or d is None):
return
if hasattr(d, 'items'):
for k, v in d.items():
# no recursion
if k.endswith("_raw") or "_raw_" in k:
if isinstance(v[1], (list, tuple)) or isinstance(v[2], (list, tuple)):
#i = 1;
for _v in v:
h = _v[0]
p = _v[1]
l = _v[2] * 2
b = _v[3]
t = _v[4]
if (len(h) != l):
l = len(h)
p = p - frame_position
# Add into result dictionary
key = str(k).replace('.', '_')
key = make_unique(key, r)
fn = frame_name.replace('.', '_')
if (fn == key):
fn = None
value = [fn, h, p, l, b, t]
r[key] = value
else:
h = v[0]
p = v[1]
l = v[2] * 2
b = v[3]
t = v[4]
if (len(h) != l):
l = len(h)
p = p - frame_position
# Add into result dictionary
key = str(k).replace('.', '_')
key = make_unique(key, r)
fn = frame_name.replace('.', '_')
if (fn == key):
fn = None
value = [fn , h, p, l, b, t]
r[key] = value
# recursion
else:
if isinstance(v, dict):
fn = frame_name
fp = frame_position
# if there is also preceding raw protocol frame use it
# remove tree suffix
key = k
if (key.endswith("_tree") or ("_tree_" in key)):
key = key.replace('_tree', '')
raw_key = key + "_raw"
if (raw_key in d):
# f = d[raw_key][0]
fn = raw_key
fp = d[raw_key][1]
py_generator(v, r, fn, fp)
elif isinstance(v, (list, tuple)):
fn = frame_name
fp = frame_position
# if there is also preceding raw protocol frame use it
# remove tree suffix
key = k
if (key.endswith("_tree") or ("_tree_" in key)):
key = key.replace('_tree', '')
raw_key = key + "_raw"
if (raw_key in d):
fn = raw_key
fp = d[raw_key][1]
for _v in v:
py_generator(_v, r, frame_name, frame_position)
# To emulate Python 3.2
def to_bytes(n, length, endianess='big'):
h = '%x' % n
s = bytearray.fromhex(('0' * (len(h) % 2) + h).zfill(length * 2))
return s if endianess == 'big' else s[::-1]
# Returns the index, counting from 0, of the least significant set bit in x
def lsb(x):
return (x & -x).bit_length() - 1
# Replace parts of original_string by new_string, only if mask in the byte is not ff
def multiply_strings(original_string, new_string, mask):
ret_string = new_string
if mask is None:
return ret_string
for i in range(0, min(len(original_string), len(new_string), len(mask)), 2):
if mask[i:i + 2] == 'ff':
#print("ff")
ret_string = ret_string[:i] + original_string[i:i + 2] + ret_string[i + 2:]
return ret_string
# Rewrite frame
# h - hex bytes
# p - position
# l - length
# b - bitmask
# t - type
# frame_amask - optional, anonymization mask (00 - not anonymized byte, ff - anonymized byte)
def rewrite_frame(frame_raw, h, p, l, b, t, frame_amask=None):
if p < 0 or l < 0 or h is None:
return frame_raw
# no bitmask
if(b == 0):
if (len(h) != l):
l = len(h)
frame_raw_new = frame_raw[:p] + h + frame_raw[p + l:]
return multiply_strings(frame_raw, frame_raw_new, frame_amask)
# bitmask
else:
# get hex string from frame which will be replaced
_h = frame_raw[p:p + l]
# add 0 padding to have correct length
if (len(_h) % 2 == 1):
_h = '0' + _h
if (len(h) % 2 == 1):
h = '0' + h
# Only replace bits defined by mask
# new_hex = (old_hex & !mask) | (new_hex & mask)
_H = bytearray.fromhex(_h)
_H = array.array('B', _H)
M = to_bytes(b, len(_H))
M = array.array('B', M)
# shift mask aligned to position
for i in range(len(M)):
if (i + p / 2) < len(M):
M[i] = M[i + int(p / 2)]
else:
M[i] = 0x00
H = bytearray.fromhex(h)
H = array.array('B', H)
# for i in range(len(_H)):
# print "{0:08b}".format(_H[i]),
# print
# for i in range(len(M)):
# print "{0:08b}".format(M[i]),
# print
j = 0
for i in range(len(_H)):
if (M[i] != 0):
v = H[j] << lsb(M[i])
# print "Debug: {0:08b}".format(v),
_H[i] = (_H[i] & ~M[i]) | (v & M[i])
# print "Debug: " + str(_H[i]),
j = j + 1
# for i in range(len(_H)):
# print "{0:08b}".format(_H[i]),
# print
masked_h = binascii.hexlify(_H)
masked_h = masked_h.decode('ascii')
frame_raw_new = frame_raw[:p] + str(masked_h) + frame_raw[p + l:]
return multiply_strings(frame_raw, frame_raw_new, frame_amask)
def assemble_frame(d, frame_time):
input = d['frame_raw'][1]
isFlat = False
linux_cooked_header = False
while not isFlat:
isFlat = True
_d = d.copy()
for key, val in _d.items():
h = str(val[1]) # hex
p = val[2] * 2 # position
l = val[3] * 2 # length
b = val[4] # bitmask
t = val[5] # type
if (key == "sll_raw"):
linux_cooked_header = True
# only if the node is not parent
isParent = False
for k, v in d.items():
if (v[0] == key):
isParent = True
isFlat = False
break
if not isParent and val[0] is not None:
d[val[0]][1] = rewrite_frame(d[val[0]][1], h, p, l, b, t)
del d[key]
output = d['frame_raw'][1]
# for Linux cooked header replace dest MAC and remove two bytes to reconstruct normal frame
if (linux_cooked_header):
output = "000000000000" + output[6*2:] # replce dest MAC
output = output[:12*2] + "" + output[14*2:] # remove two bytes before Protocol
return output
def generate_pcap(d):
# 1. Assemble frame
input = d['frame_raw'][1]
output = assemble_frame(d, None)
print(input)
print(output)
# 2. Testing: compare input and output for not modified json
if (input != output):
print("Modified frames: ")
s1 = input
s2 = output
print(s1)
print(s2)
if (len(s1) == len(s2)):
d = [i for i in range(len(s1)) if s1[i] != s2[i]]
print(d)
# 3. Generate pcap
outfile = sys.argv[0] + ".pcap"
pcap_out = scapy.PcapWriter(outfile, append=False, sync=False)
new_packet = scapy.Packet(bytearray.fromhex(output))
pcap_out.write(new_packet)
print("Generated " + outfile)
#
# ************ MAIN **************
#
VERSION = "1.1"
parser = argparse.ArgumentParser(description="""
json2pcap {version}
Utility to generate pcap from json format.
Packet modification:
In input json it is possible to modify the raw values of decoded fields.
The output pcap will include the modified values. The algorithm of
generating the output pcap is to get all raw hex fields from input json and
then assembling them by layering from longest (less decoded fields) to
shortest (more decoded fields). It means if the modified raw field is
shorter field (more decoded field) it takes precedence against modification
in longer field (less decoded field). If the json includes duplicated raw
fields with same position and length, the behavior is not deterministic.
For manual packet editing it is always possible to remove any not required
raw fields from json, only frame_raw is field mandatory for reconstruction.
Packet modification with -p switch:
The python script is generated instead of pcap. This python script when
executed will generate the pcap of 1st packet from input json. The
generated code includes the decoded fields and the function to assembly the
packet. This enables to modify the script and programmatically edit or
encode the packet variables. The assembling algorithm is different, because
the decoded packet fields are relative and points to parent node with their
position (compared to input json which has absolute positions).
Pcap masking and anonymization with -m and -a switch:
The script allows to mask or anonymize the selected json raw fields. If the
The fields are selected and located on lower protocol layers, they are not
The overwritten by upper fields which are not marked by these switches.
The pcap masking and anonymization can be performed in the following way:
tshark -r orig.pcap -T json -x | \ python json2pcap.py -m "ip.src_raw"
-a "ip.dst_raw" -o anonymized.pcap
In this example the ip.src_raw field is masked with ffffffff by byte values
and ip.dst_raw is hashed by randomly generated salt.
Additionally the following syntax is valid to anonymize portion of field
tshark -r orig.pcap -T json -x | \ python json2pcap.py -m "ip.src_raw[2:]"
-a "ip.dst_raw[:-2]" -o anonymized.pcap
Where the src_ip first byte is preserved and dst_ip last byte is preserved.
And the same can be achieved by
tshark -r orig.pcap -T json -x | \ python json2pcap.py -m "ip.src_raw[2:8]"
-a "ip.dst_raw[0:6]" -o anonymized.pcap
Masking and anonymization limitations are mainly the following:
- In case the tshark is performing reassembling from multiple frames, the
backward pcap reconstruction is not properly performed and can result in
malformed frames.
- The new values in the fields could violate the field format, as the
json2pcap is no performing correct protocol encoding with respect to
allowed values of the target field and field encoding.
""".format(version=VERSION), formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
parser.add_argument('-i', '--infile', nargs='?', help='json generated by tshark -T json -x\nor by tshark -T jsonraw (not preserving frame timestamps).\nIf no inpout file is specified script reads from stdin.')
parser.add_argument('-o', '--outfile', required=True, help='output pcap filename')
parser.add_argument('-p', '--python', help='generate python payload instead of pcap (only 1st packet)', default=False, action='store_true')
parser.add_argument('-m', '--mask', help='mask the specific raw field (e.g. -m "ip.src_raw" -m "ip.dst_raw[2:6]")', action='append', metavar='MASKED_FIELD')
parser.add_argument('-a', '--anonymize', help='anonymize the specific raw field (e.g. -a "ip.src_raw[2:]" -a "ip.dst_raw[:-2]")', action='append', metavar='ANONYMIZED_FIELD')
parser.add_argument('-s', '--salt', help='salt use for anonymization. If no value is provided it is randomized.', default=None)
parser.add_argument('-v', '--verbose', help='verbose output', default=False, action='store_true')
args = parser.parse_args()
# read JSON
infile = args.infile
outfile = args.outfile
# Read from input file
if infile:
data_file = open(infile)
# Read from pipe
else:
data_file = sys.stdin
# Parse anonymization fields
anonymize = {}
if args.mask:
for m in args.mask:
if '_raw' not in m:
print("Error: The specified fields by -m switch should be raw fields. " + m + " does not have _raw suffix")
sys.exit()
af = AnonymizedField(m, 0)
anonymize[af.field] = af
if args.anonymize:
for a in args.anonymize:
if '_raw' not in a:
print("Error: The specified fields by -a switch should be raw fields. " + a + " does not have _raw suffix")
sys.exit()
af = AnonymizedField(a, 1)
anonymize[af.field] = af
input_frame_raw = ''
frame_raw = ''
frame_time = None
salt = args.salt
if salt is None:
# generate random salt if no salt was provided
salt = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
# Generate pcap
if args.python is False:
pcap_out = scapy.PcapWriter(outfile, append=False, sync=False)
# Iterate over packets in JSON
for packet in ijson.items(data_file, "item", buf_size=200000):
_list = []
linux_cooked_header = False
# get flat raw fields into _list
for raw in raw_flat_collector(packet['_source']['layers']):
if len(raw) >= 2:
if (raw[0] == "frame_raw"):
frame_raw = raw[1][0]
frame_amask = "0"*len(frame_raw) # initialize anonymization mask
input_frame_raw = copy.copy(frame_raw)
frame_time = None
if 'frame.time_epoch' in packet['_source']['layers']['frame']:
frame_time = packet['_source']['layers']['frame']['frame.time_epoch']
else:
# add into value list into raw[5] the field name
if isinstance(raw[1], list):
raw[1].append(raw[0])
_list.append(raw[1])
if (raw[0] == "sll_raw"):
linux_cooked_header = True
# sort _list
sorted_list = sorted(_list, key=operator.itemgetter(1), reverse=False)
sorted_list = sorted(sorted_list, key=operator.itemgetter(2), reverse=True)
# print("Debug: " + str(sorted_list))
# rewrite frame
for raw in sorted_list:
if len(raw) >= 6:
h = str(raw[0]) # hex
p = raw[1] * 2 # position
l = raw[2] * 2 # length
b = raw[3] # bitmask
t = raw[4] # type
# raw[5] # field_name (added by script)
h_mask = h # hex for anonymization mask
# anonymize fields
if (raw[5] in anonymize):
[h, h_mask] = anonymize[raw[5]].anonymize_field(h, t, salt)
if (isinstance(p, (list, tuple)) or isinstance(l, (list, tuple))):
for r in raw:
_h = str(r[0]) # hex
_p = r[1] * 2 # position
_l = r[2] * 2 # length
_b = r[3] # bitmask
_t = r[4] # type
# raw[5] # field_name (added by script)
_h_mask = _h # hex for anonymization mask
# anonymize fields
if (raw[5] in anonymize):
[_h, _h_mask] = anonymize[raw[5]].anonymize_field(_h, _t, salt)
# print("Debug: " + str(raw))
frame_raw = rewrite_frame(frame_raw, _h, _p, _l, _b, _t, frame_amask)
# update anonymization mask
if (raw[5] in anonymize):
frame_amask = rewrite_frame(frame_amask, _h_mask, _p, _l, _b, _t)
else:
# print("Debug: " + str(raw))
frame_raw = rewrite_frame(frame_raw, h, p, l, b, t, frame_amask)
# update anonymization mask
if (raw[5] in anonymize):
frame_amask = rewrite_frame(frame_amask, h_mask, p, l, b, t)
# for Linux cooked header replace dest MAC and remove two bytes to reconstruct normal frame using text2pcap
if (linux_cooked_header):
frame_raw = "000000000000" + frame_raw[6 * 2:] # replce dest MAC
frame_raw = frame_raw[:12 * 2] + "" + frame_raw[14 * 2:] # remove two bytes before Protocol
# Testing: remove comment to compare input and output for not modified json
if (args.verbose and input_frame_raw != frame_raw):
print("Modified frames: ")
s1 = input_frame_raw
s2 = frame_raw
print(s1)
print(s2)
if (len(s1) == len(s2)):
d = [i for i in range(len(s1)) if s1[i] != s2[i]]
print(d)
new_packet = scapy.Packet(bytearray.fromhex(frame_raw))
if frame_time:
new_packet.time = float(frame_time)
pcap_out.write(new_packet)
# Generate python payload only for first packet
else:
py_outfile = outfile + '.py'
f = open(py_outfile, 'w')
#for packet in json:
for packet in ijson.items(data_file, "item", buf_size=200000):
f.write(py_header)
r = OrderedDict({})
#print "packet = " + str(packet['_source']['layers'])
py_generator(packet['_source']['layers'], r)
for key, value in r.items():
f.write(" d['" + key + "'] =",)
f.write(" " + str(value) + "\n")
f.write(py_footer)
# Currently only first packet is used from pcap
f.close
print("Generated " + py_outfile)
break |
Shell Script | wireshark/tools/lemon/apply-patches.sh | #!/bin/sh -e
# Patch lemon.c and lempar.c to silence static analyzer warnings.
# See also tools/lemon/README
# Strip trailing whitespace
sed -e 's/ \+$//' -i lemon.c lempar.c
# Other patches
if [ -d "patches" ]; then
for i in patches/*.patch; do
echo "Applying $i"
patch --silent -p1 -i "$i"
done
fi
echo DONE |
Text | wireshark/tools/lemon/CMakeLists.txt | # CMakeLists.txt
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
add_executable(lemon lemon.c)
if(DEFINED LEMON_C_COMPILER)
set(CMAKE_C_COMPILER "${LEMON_C_COMPILER}")
set(CMAKE_C_FLAGS "")
endif()
# To keep lemon.c as close to upstream as possible disable all warnings
if(CMAKE_C_COMPILER_ID MATCHES "MSVC")
target_compile_options(lemon PRIVATE /w)
else()
target_compile_options(lemon PRIVATE -w)
endif()
if(CMAKE_C_COMPILER_ID MATCHES "Clang")
# Disable static analysis for lemon source code. These issues don't
# affect Wireshark at runtime.
target_compile_options(lemon PRIVATE -Xclang -analyzer-disable-all-checks)
endif()
if(DEFINED NO_SANITIZE_CFLAGS)
target_compile_options(lemon PRIVATE ${NO_SANITIZE_CFLAGS})
endif()
if(DEFINED NO_SANITIZE_LDFLAGS)
target_link_options(lemon PRIVATE ${NO_SANITIZE_LDFLAGS})
endif()
#
# Editor modelines - https://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 8
# tab-width: 8
# indent-tabs-mode: t
# End:
#
# vi: set shiftwidth=8 tabstop=8 noexpandtab:
# :indentSize=8:tabSize=8:noTabs=false:
# |
C | wireshark/tools/lemon/lemon.c | /*
** This file contains all sources (including headers) to the LEMON
** LALR(1) parser generator. The sources have been combined into a
** single file to make it easy to include LEMON in the source tree
** and Makefile of another program.
**
** The author of this program disclaims copyright.
*/
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <ctype.h>
#include <stdlib.h>
#include <assert.h>
#define ISSPACE(X) isspace((unsigned char)(X))
#define ISDIGIT(X) isdigit((unsigned char)(X))
#define ISALNUM(X) isalnum((unsigned char)(X))
#define ISALPHA(X) isalpha((unsigned char)(X))
#define ISUPPER(X) isupper((unsigned char)(X))
#define ISLOWER(X) islower((unsigned char)(X))
#ifndef __WIN32__
# if defined(_WIN32) || defined(WIN32)
# define __WIN32__
# endif
#endif
#ifdef __WIN32__
#ifdef __cplusplus
extern "C" {
#endif
extern int access(const char *path, int mode);
#ifdef __cplusplus
}
#endif
#else
#include <unistd.h>
#endif
/* #define PRIVATE static */
#define PRIVATE
#ifdef TEST
#define MAXRHS 5 /* Set low to exercise exception code */
#else
#define MAXRHS 1000
#endif
extern void memory_error();
static int showPrecedenceConflict = 0;
static char *msort(char*,char**,int(*)(const char*,const char*));
/*
** Compilers are getting increasingly pedantic about type conversions
** as C evolves ever closer to Ada.... To work around the latest problems
** we have to define the following variant of strlen().
*/
#define lemonStrlen(X) ((int)strlen(X))
/*
** Compilers are starting to complain about the use of sprintf() and strcpy(),
** saying they are unsafe. So we define our own versions of those routines too.
**
** There are three routines here: lemon_sprintf(), lemon_vsprintf(), and
** lemon_addtext(). The first two are replacements for sprintf() and vsprintf().
** The third is a helper routine for vsnprintf() that adds texts to the end of a
** buffer, making sure the buffer is always zero-terminated.
**
** The string formatter is a minimal subset of stdlib sprintf() supporting only
** a few simply conversions:
**
** %d
** %s
** %.*s
**
*/
static void lemon_addtext(
char *zBuf, /* The buffer to which text is added */
int *pnUsed, /* Slots of the buffer used so far */
const char *zIn, /* Text to add */
int nIn, /* Bytes of text to add. -1 to use strlen() */
int iWidth /* Field width. Negative to left justify */
){
if( nIn<0 ) for(nIn=0; zIn[nIn]; nIn++){}
while( iWidth>nIn ){ zBuf[(*pnUsed)++] = ' '; iWidth--; }
if( nIn==0 ) return;
memcpy(&zBuf[*pnUsed], zIn, nIn);
*pnUsed += nIn;
while( (-iWidth)>nIn ){ zBuf[(*pnUsed)++] = ' '; iWidth++; }
zBuf[*pnUsed] = 0;
}
static int lemon_vsprintf(char *str, const char *zFormat, va_list ap){
int i, j, k, c;
int nUsed = 0;
const char *z;
char zTemp[50];
str[0] = 0;
for(i=j=0; (c = zFormat[i])!=0; i++){
if( c=='%' ){
int iWidth = 0;
lemon_addtext(str, &nUsed, &zFormat[j], i-j, 0);
c = zFormat[++i];
if( ISDIGIT(c) || (c=='-' && ISDIGIT(zFormat[i+1])) ){
if( c=='-' ) i++;
while( ISDIGIT(zFormat[i]) ) iWidth = iWidth*10 + zFormat[i++] - '0';
if( c=='-' ) iWidth = -iWidth;
c = zFormat[i];
}
if( c=='d' ){
int v = va_arg(ap, int);
if( v<0 ){
lemon_addtext(str, &nUsed, "-", 1, iWidth);
v = -v;
}else if( v==0 ){
lemon_addtext(str, &nUsed, "0", 1, iWidth);
}
k = 0;
while( v>0 ){
k++;
zTemp[sizeof(zTemp)-k] = (v%10) + '0';
v /= 10;
}
lemon_addtext(str, &nUsed, &zTemp[sizeof(zTemp)-k], k, iWidth);
}else if( c=='s' ){
z = va_arg(ap, const char*);
lemon_addtext(str, &nUsed, z, -1, iWidth);
}else if( c=='.' && memcmp(&zFormat[i], ".*s", 3)==0 ){
i += 2;
k = va_arg(ap, int);
z = va_arg(ap, const char*);
lemon_addtext(str, &nUsed, z, k, iWidth);
}else if( c=='%' ){
lemon_addtext(str, &nUsed, "%", 1, 0);
}else{
fprintf(stderr, "illegal format\n");
exit(1);
}
j = i+1;
}
}
lemon_addtext(str, &nUsed, &zFormat[j], i-j, 0);
return nUsed;
}
static int lemon_sprintf(char *str, const char *format, ...){
va_list ap;
int rc;
va_start(ap, format);
rc = lemon_vsprintf(str, format, ap);
va_end(ap);
return rc;
}
static void lemon_strcpy(char *dest, const char *src){
while( (*(dest++) = *(src++))!=0 ){}
}
static void lemon_strcat(char *dest, const char *src){
while( *dest ) dest++;
lemon_strcpy(dest, src);
}
/* a few forward declarations... */
struct rule;
struct lemon;
struct action;
static struct action *Action_new(void);
static struct action *Action_sort(struct action *);
/********** From the file "build.h" ************************************/
void FindRulePrecedences(struct lemon*);
void FindFirstSets(struct lemon*);
void FindStates(struct lemon*);
void FindLinks(struct lemon*);
void FindFollowSets(struct lemon*);
void FindActions(struct lemon*);
/********* From the file "configlist.h" *********************************/
void Configlist_init(void);
struct config *Configlist_add(struct rule *, int);
struct config *Configlist_addbasis(struct rule *, int);
void Configlist_closure(struct lemon *);
void Configlist_sort(void);
void Configlist_sortbasis(void);
struct config *Configlist_return(void);
struct config *Configlist_basis(void);
void Configlist_eat(struct config *);
void Configlist_reset(void);
/********* From the file "error.h" ***************************************/
void ErrorMsg(const char *, int,const char *, ...);
/****** From the file "option.h" ******************************************/
enum option_type { OPT_FLAG=1, OPT_INT, OPT_DBL, OPT_STR,
OPT_FFLAG, OPT_FINT, OPT_FDBL, OPT_FSTR};
struct s_options {
enum option_type type;
const char *label;
char *arg;
const char *message;
};
int OptInit(char**,struct s_options*,FILE*);
int OptNArgs(void);
char *OptArg(int);
void OptErr(int);
void OptPrint(void);
/******** From the file "parse.h" *****************************************/
void Parse(struct lemon *lemp);
/********* From the file "plink.h" ***************************************/
struct plink *Plink_new(void);
void Plink_add(struct plink **, struct config *);
void Plink_copy(struct plink **, struct plink *);
void Plink_delete(struct plink *);
/********** From the file "report.h" *************************************/
void Reprint(struct lemon *);
void ReportOutput(struct lemon *);
void ReportTable(struct lemon *, int, int);
void ReportHeader(struct lemon *);
void CompressTables(struct lemon *);
void ResortStates(struct lemon *);
/********** From the file "set.h" ****************************************/
void SetSize(int); /* All sets will be of size N */
char *SetNew(void); /* A new set for element 0..N */
void SetFree(char*); /* Deallocate a set */
int SetAdd(char*,int); /* Add element to a set */
int SetUnion(char *,char *); /* A <- A U B, thru element N */
#define SetFind(X,Y) (X[Y]) /* True if Y is in set X */
/********** From the file "struct.h" *************************************/
/*
** Principal data structures for the LEMON parser generator.
*/
typedef enum {LEMON_FALSE=0, LEMON_TRUE} Boolean;
/* Symbols (terminals and nonterminals) of the grammar are stored
** in the following: */
enum symbol_type {
TERMINAL,
NONTERMINAL,
MULTITERMINAL
};
enum e_assoc {
LEFT,
RIGHT,
NONE,
UNK
};
struct symbol {
const char *name; /* Name of the symbol */
int index; /* Index number for this symbol */
enum symbol_type type; /* Symbols are all either TERMINALS or NTs */
struct rule *rule; /* Linked list of rules of this (if an NT) */
struct symbol *fallback; /* fallback token in case this token doesn't parse */
int prec; /* Precedence if defined (-1 otherwise) */
enum e_assoc assoc; /* Associativity if precedence is defined */
char *firstset; /* First-set for all rules of this symbol */
Boolean lambda; /* True if NT and can generate an empty string */
int useCnt; /* Number of times used */
char *destructor; /* Code which executes whenever this symbol is
** popped from the stack during error processing */
int destLineno; /* Line number for start of destructor. Set to
** -1 for duplicate destructors. */
char *datatype; /* The data type of information held by this
** object. Only used if type==NONTERMINAL */
int dtnum; /* The data type number. In the parser, the value
** stack is a union. The .yy%d element of this
** union is the correct data type for this object */
int bContent; /* True if this symbol ever carries content - if
** it is ever more than just syntax */
/* The following fields are used by MULTITERMINALs only */
int nsubsym; /* Number of constituent symbols in the MULTI */
struct symbol **subsym; /* Array of constituent symbols */
};
/* Each production rule in the grammar is stored in the following
** structure. */
struct rule {
struct symbol *lhs; /* Left-hand side of the rule */
const char *lhsalias; /* Alias for the LHS (NULL if none) */
int lhsStart; /* True if left-hand side is the start symbol */
int ruleline; /* Line number for the rule */
int nrhs; /* Number of RHS symbols */
struct symbol **rhs; /* The RHS symbols */
const char **rhsalias; /* An alias for each RHS symbol (NULL if none) */
int line; /* Line number at which code begins */
const char *code; /* The code executed when this rule is reduced */
const char *codePrefix; /* Setup code before code[] above */
const char *codeSuffix; /* Breakdown code after code[] above */
struct symbol *precsym; /* Precedence symbol for this rule */
int index; /* An index number for this rule */
int iRule; /* Rule number as used in the generated tables */
Boolean noCode; /* True if this rule has no associated C code */
Boolean codeEmitted; /* True if the code has been emitted already */
Boolean canReduce; /* True if this rule is ever reduced */
Boolean doesReduce; /* Reduce actions occur after optimization */
Boolean neverReduce; /* Reduce is theoretically possible, but prevented
** by actions or other outside implementation */
struct rule *nextlhs; /* Next rule with the same LHS */
struct rule *next; /* Next rule in the global list */
};
/* A configuration is a production rule of the grammar together with
** a mark (dot) showing how much of that rule has been processed so far.
** Configurations also contain a follow-set which is a list of terminal
** symbols which are allowed to immediately follow the end of the rule.
** Every configuration is recorded as an instance of the following: */
enum cfgstatus {
COMPLETE,
INCOMPLETE
};
struct config {
struct rule *rp; /* The rule upon which the configuration is based */
int dot; /* The parse point */
char *fws; /* Follow-set for this configuration only */
struct plink *fplp; /* Follow-set forward propagation links */
struct plink *bplp; /* Follow-set backwards propagation links */
struct state *stp; /* Pointer to state which contains this */
enum cfgstatus status; /* used during followset and shift computations */
struct config *next; /* Next configuration in the state */
struct config *bp; /* The next basis configuration */
};
enum e_action {
SHIFT,
ACCEPT,
REDUCE,
ERROR,
SSCONFLICT, /* A shift/shift conflict */
SRCONFLICT, /* Was a reduce, but part of a conflict */
RRCONFLICT, /* Was a reduce, but part of a conflict */
SH_RESOLVED, /* Was a shift. Precedence resolved conflict */
RD_RESOLVED, /* Was reduce. Precedence resolved conflict */
NOT_USED, /* Deleted by compression */
SHIFTREDUCE /* Shift first, then reduce */
};
/* Every shift or reduce operation is stored as one of the following */
struct action {
struct symbol *sp; /* The look-ahead symbol */
enum e_action type;
union {
struct state *stp; /* The new state, if a shift */
struct rule *rp; /* The rule, if a reduce */
} x;
struct symbol *spOpt; /* SHIFTREDUCE optimization to this symbol */
struct action *next; /* Next action for this state */
struct action *collide; /* Next action with the same hash */
};
/* Each state of the generated parser's finite state machine
** is encoded as an instance of the following structure. */
struct state {
struct config *bp; /* The basis configurations for this state */
struct config *cfp; /* All configurations in this set */
int statenum; /* Sequential number for this state */
struct action *ap; /* List of actions for this state */
int nTknAct, nNtAct; /* Number of actions on terminals and nonterminals */
int iTknOfst, iNtOfst; /* yy_action[] offset for terminals and nonterms */
int iDfltReduce; /* Default action is to REDUCE by this rule */
struct rule *pDfltReduce;/* The default REDUCE rule. */
int autoReduce; /* True if this is an auto-reduce state */
};
#define NO_OFFSET (-2147483647)
/* A followset propagation link indicates that the contents of one
** configuration followset should be propagated to another whenever
** the first changes. */
struct plink {
struct config *cfp; /* The configuration to which linked */
struct plink *next; /* The next propagate link */
};
/* The state vector for the entire parser generator is recorded as
** follows. (LEMON uses no global variables and makes little use of
** static variables. Fields in the following structure can be thought
** of as begin global variables in the program.) */
struct lemon {
struct state **sorted; /* Table of states sorted by state number */
struct rule *rule; /* List of all rules */
struct rule *startRule; /* First rule */
int nstate; /* Number of states */
int nxstate; /* nstate with tail degenerate states removed */
int nrule; /* Number of rules */
int nruleWithAction; /* Number of rules with actions */
int nsymbol; /* Number of terminal and nonterminal symbols */
int nterminal; /* Number of terminal symbols */
int minShiftReduce; /* Minimum shift-reduce action value */
int errAction; /* Error action value */
int accAction; /* Accept action value */
int noAction; /* No-op action value */
int minReduce; /* Minimum reduce action */
int maxAction; /* Maximum action value of any kind */
struct symbol **symbols; /* Sorted array of pointers to symbols */
int errorcnt; /* Number of errors */
struct symbol *errsym; /* The error symbol */
struct symbol *wildcard; /* Token that matches anything */
char *name; /* Name of the generated parser */
char *arg; /* Declaration of the 3rd argument to parser */
char *ctx; /* Declaration of 2nd argument to constructor */
char *tokentype; /* Type of terminal symbols in the parser stack */
char *vartype; /* The default type of non-terminal symbols */
char *start; /* Name of the start symbol for the grammar */
char *stacksize; /* Size of the parser stack */
char *include; /* Code to put at the start of the C file */
char *error; /* Code to execute when an error is seen */
char *overflow; /* Code to execute on a stack overflow */
char *failure; /* Code to execute on parser failure */
char *accept; /* Code to execute when the parser excepts */
char *extracode; /* Code appended to the generated file */
char *tokendest; /* Code to execute to destroy token data */
char *vardest; /* Code for the default non-terminal destructor */
char *filename; /* Name of the input file */
char *outname; /* Name of the current output file */
char *tokenprefix; /* A prefix added to token names in the .h file */
int nconflict; /* Number of parsing conflicts */
int nactiontab; /* Number of entries in the yy_action[] table */
int nlookaheadtab; /* Number of entries in yy_lookahead[] */
int tablesize; /* Total table size of all tables in bytes */
int basisflag; /* Print only basis configurations */
int printPreprocessed; /* Show preprocessor output on stdout */
int has_fallback; /* True if any %fallback is seen in the grammar */
int nolinenosflag; /* True if #line statements should not be printed */
char *argv0; /* Name of the program */
};
#define MemoryCheck(X) if((X)==0){ \
extern void memory_error(); \
memory_error(); \
}
/**************** From the file "table.h" *********************************/
/*
** All code in this file has been automatically generated
** from a specification in the file
** "table.q"
** by the associative array code building program "aagen".
** Do not edit this file! Instead, edit the specification
** file, then rerun aagen.
*/
/*
** Code for processing tables in the LEMON parser generator.
*/
/* Routines for handling a strings */
const char *Strsafe(const char *);
void Strsafe_init(void);
int Strsafe_insert(const char *);
const char *Strsafe_find(const char *);
/* Routines for handling symbols of the grammar */
struct symbol *Symbol_new(const char *);
int Symbolcmpp(const void *, const void *);
void Symbol_init(void);
int Symbol_insert(struct symbol *, const char *);
struct symbol *Symbol_find(const char *);
struct symbol *Symbol_Nth(int);
int Symbol_count(void);
struct symbol **Symbol_arrayof(void);
/* Routines to manage the state table */
int Configcmp(const char *, const char *);
struct state *State_new(void);
void State_init(void);
int State_insert(struct state *, struct config *);
struct state *State_find(struct config *);
struct state **State_arrayof(void);
/* Routines used for efficiency in Configlist_add */
void Configtable_init(void);
int Configtable_insert(struct config *);
struct config *Configtable_find(struct config *);
void Configtable_clear(int(*)(struct config *));
/****************** From the file "action.c" *******************************/
/*
** Routines processing parser actions in the LEMON parser generator.
*/
/* Allocate a new parser action */
static struct action *Action_new(void){
static struct action *actionfreelist = 0;
struct action *newaction;
if( actionfreelist==0 ){
int i;
int amt = 100;
actionfreelist = (struct action *)calloc(amt, sizeof(struct action));
if( actionfreelist==0 ){
fprintf(stderr,"Unable to allocate memory for a new parser action.");
exit(1);
}
for(i=0; i<amt-1; i++) actionfreelist[i].next = &actionfreelist[i+1];
actionfreelist[amt-1].next = 0;
}
newaction = actionfreelist;
actionfreelist = actionfreelist->next;
return newaction;
}
/* Compare two actions for sorting purposes. Return negative, zero, or
** positive if the first action is less than, equal to, or greater than
** the first
*/
static int actioncmp(
struct action *ap1,
struct action *ap2
){
int rc;
rc = ap1->sp->index - ap2->sp->index;
if( rc==0 ){
rc = (int)ap1->type - (int)ap2->type;
}
if( rc==0 && (ap1->type==REDUCE || ap1->type==SHIFTREDUCE) ){
rc = ap1->x.rp->index - ap2->x.rp->index;
}
if( rc==0 ){
rc = (int) (ap2 - ap1);
}
return rc;
}
/* Sort parser actions */
static struct action *Action_sort(
struct action *ap
){
ap = (struct action *)msort((char *)ap,(char **)&ap->next,
(int(*)(const char*,const char*))actioncmp);
return ap;
}
void Action_add(
struct action **app,
enum e_action type,
struct symbol *sp,
char *arg
){
struct action *newaction;
newaction = Action_new();
newaction->next = *app;
*app = newaction;
newaction->type = type;
newaction->sp = sp;
newaction->spOpt = 0;
if( type==SHIFT ){
newaction->x.stp = (struct state *)arg;
}else{
newaction->x.rp = (struct rule *)arg;
}
}
/********************** New code to implement the "acttab" module ***********/
/*
** This module implements routines use to construct the yy_action[] table.
*/
/*
** The state of the yy_action table under construction is an instance of
** the following structure.
**
** The yy_action table maps the pair (state_number, lookahead) into an
** action_number. The table is an array of integers pairs. The state_number
** determines an initial offset into the yy_action array. The lookahead
** value is then added to this initial offset to get an index X into the
** yy_action array. If the aAction[X].lookahead equals the value of the
** of the lookahead input, then the value of the action_number output is
** aAction[X].action. If the lookaheads do not match then the
** default action for the state_number is returned.
**
** All actions associated with a single state_number are first entered
** into aLookahead[] using multiple calls to acttab_action(). Then the
** actions for that single state_number are placed into the aAction[]
** array with a single call to acttab_insert(). The acttab_insert() call
** also resets the aLookahead[] array in preparation for the next
** state number.
*/
struct lookahead_action {
int lookahead; /* Value of the lookahead token */
int action; /* Action to take on the given lookahead */
};
typedef struct acttab acttab;
struct acttab {
int nAction; /* Number of used slots in aAction[] */
int nActionAlloc; /* Slots allocated for aAction[] */
struct lookahead_action
*aAction, /* The yy_action[] table under construction */
*aLookahead; /* A single new transaction set */
int mnLookahead; /* Minimum aLookahead[].lookahead */
int mnAction; /* Action associated with mnLookahead */
int mxLookahead; /* Maximum aLookahead[].lookahead */
int nLookahead; /* Used slots in aLookahead[] */
int nLookaheadAlloc; /* Slots allocated in aLookahead[] */
int nterminal; /* Number of terminal symbols */
int nsymbol; /* total number of symbols */
};
/* Return the number of entries in the yy_action table */
#define acttab_lookahead_size(X) ((X)->nAction)
/* The value for the N-th entry in yy_action */
#define acttab_yyaction(X,N) ((X)->aAction[N].action)
/* The value for the N-th entry in yy_lookahead */
#define acttab_yylookahead(X,N) ((X)->aAction[N].lookahead)
/* Free all memory associated with the given acttab */
void acttab_free(acttab *p){
free( p->aAction );
free( p->aLookahead );
free( p );
}
/* Allocate a new acttab structure */
acttab *acttab_alloc(int nsymbol, int nterminal){
acttab *p = (acttab *) calloc( 1, sizeof(*p) );
if( p==0 ){
fprintf(stderr,"Unable to allocate memory for a new acttab.");
exit(1);
}
memset(p, 0, sizeof(*p));
p->nsymbol = nsymbol;
p->nterminal = nterminal;
return p;
}
/* Add a new action to the current transaction set.
**
** This routine is called once for each lookahead for a particular
** state.
*/
void acttab_action(acttab *p, int lookahead, int action){
if( p->nLookahead>=p->nLookaheadAlloc ){
p->nLookaheadAlloc += 25;
p->aLookahead = (struct lookahead_action *) realloc( p->aLookahead,
sizeof(p->aLookahead[0])*p->nLookaheadAlloc );
if( p->aLookahead==0 ){
fprintf(stderr,"malloc failed\n");
exit(1);
}
}
if( p->nLookahead==0 ){
p->mxLookahead = lookahead;
p->mnLookahead = lookahead;
p->mnAction = action;
}else{
if( p->mxLookahead<lookahead ) p->mxLookahead = lookahead;
if( p->mnLookahead>lookahead ){
p->mnLookahead = lookahead;
p->mnAction = action;
}
}
p->aLookahead[p->nLookahead].lookahead = lookahead;
p->aLookahead[p->nLookahead].action = action;
p->nLookahead++;
}
/*
** Add the transaction set built up with prior calls to acttab_action()
** into the current action table. Then reset the transaction set back
** to an empty set in preparation for a new round of acttab_action() calls.
**
** Return the offset into the action table of the new transaction.
**
** If the makeItSafe parameter is true, then the offset is chosen so that
** it is impossible to overread the yy_lookaside[] table regardless of
** the lookaside token. This is done for the terminal symbols, as they
** come from external inputs and can contain syntax errors. When makeItSafe
** is false, there is more flexibility in selecting offsets, resulting in
** a smaller table. For non-terminal symbols, which are never syntax errors,
** makeItSafe can be false.
*/
int acttab_insert(acttab *p, int makeItSafe){
int i, j, k, n, end;
assert( p->nLookahead>0 );
/* Make sure we have enough space to hold the expanded action table
** in the worst case. The worst case occurs if the transaction set
** must be appended to the current action table
*/
n = p->nsymbol + 1;
if( p->nAction + n >= p->nActionAlloc ){
int oldAlloc = p->nActionAlloc;
p->nActionAlloc = p->nAction + n + p->nActionAlloc + 20;
p->aAction = (struct lookahead_action *) realloc( p->aAction,
sizeof(p->aAction[0])*p->nActionAlloc);
if( p->aAction==0 ){
fprintf(stderr,"malloc failed\n");
exit(1);
}
for(i=oldAlloc; i<p->nActionAlloc; i++){
p->aAction[i].lookahead = -1;
p->aAction[i].action = -1;
}
}
/* Scan the existing action table looking for an offset that is a
** duplicate of the current transaction set. Fall out of the loop
** if and when the duplicate is found.
**
** i is the index in p->aAction[] where p->mnLookahead is inserted.
*/
end = makeItSafe ? p->mnLookahead : 0;
for(i=p->nAction-1; i>=end; i--){
if( p->aAction[i].lookahead==p->mnLookahead ){
/* All lookaheads and actions in the aLookahead[] transaction
** must match against the candidate aAction[i] entry. */
if( p->aAction[i].action!=p->mnAction ) continue;
for(j=0; j<p->nLookahead; j++){
k = p->aLookahead[j].lookahead - p->mnLookahead + i;
if( k<0 || k>=p->nAction ) break;
if( p->aLookahead[j].lookahead!=p->aAction[k].lookahead ) break;
if( p->aLookahead[j].action!=p->aAction[k].action ) break;
}
if( j<p->nLookahead ) continue;
/* No possible lookahead value that is not in the aLookahead[]
** transaction is allowed to match aAction[i] */
n = 0;
for(j=0; j<p->nAction; j++){
if( p->aAction[j].lookahead<0 ) continue;
if( p->aAction[j].lookahead==j+p->mnLookahead-i ) n++;
}
if( n==p->nLookahead ){
break; /* An exact match is found at offset i */
}
}
}
/* If no existing offsets exactly match the current transaction, find an
** an empty offset in the aAction[] table in which we can add the
** aLookahead[] transaction.
*/
if( i<end ){
/* Look for holes in the aAction[] table that fit the current
** aLookahead[] transaction. Leave i set to the offset of the hole.
** If no holes are found, i is left at p->nAction, which means the
** transaction will be appended. */
i = makeItSafe ? p->mnLookahead : 0;
for(; i<p->nActionAlloc - p->mxLookahead; i++){
if( p->aAction[i].lookahead<0 ){
for(j=0; j<p->nLookahead; j++){
k = p->aLookahead[j].lookahead - p->mnLookahead + i;
if( k<0 ) break;
if( p->aAction[k].lookahead>=0 ) break;
}
if( j<p->nLookahead ) continue;
for(j=0; j<p->nAction; j++){
if( p->aAction[j].lookahead==j+p->mnLookahead-i ) break;
}
if( j==p->nAction ){
break; /* Fits in empty slots */
}
}
}
}
/* Insert transaction set at index i. */
#if 0
printf("Acttab:");
for(j=0; j<p->nLookahead; j++){
printf(" %d", p->aLookahead[j].lookahead);
}
printf(" inserted at %d\n", i);
#endif
for(j=0; j<p->nLookahead; j++){
k = p->aLookahead[j].lookahead - p->mnLookahead + i;
p->aAction[k] = p->aLookahead[j];
if( k>=p->nAction ) p->nAction = k+1;
}
if( makeItSafe && i+p->nterminal>=p->nAction ) p->nAction = i+p->nterminal+1;
p->nLookahead = 0;
/* Return the offset that is added to the lookahead in order to get the
** index into yy_action of the action */
return i - p->mnLookahead;
}
/*
** Return the size of the action table without the trailing syntax error
** entries.
*/
int acttab_action_size(acttab *p){
int n = p->nAction;
while( n>0 && p->aAction[n-1].lookahead<0 ){ n--; }
return n;
}
/********************** From the file "build.c" *****************************/
/*
** Routines to construction the finite state machine for the LEMON
** parser generator.
*/
/* Find a precedence symbol of every rule in the grammar.
**
** Those rules which have a precedence symbol coded in the input
** grammar using the "[symbol]" construct will already have the
** rp->precsym field filled. Other rules take as their precedence
** symbol the first RHS symbol with a defined precedence. If there
** are not RHS symbols with a defined precedence, the precedence
** symbol field is left blank.
*/
void FindRulePrecedences(struct lemon *xp)
{
struct rule *rp;
for(rp=xp->rule; rp; rp=rp->next){
if( rp->precsym==0 ){
int i, j;
for(i=0; i<rp->nrhs && rp->precsym==0; i++){
struct symbol *sp = rp->rhs[i];
if( sp->type==MULTITERMINAL ){
for(j=0; j<sp->nsubsym; j++){
if( sp->subsym[j]->prec>=0 ){
rp->precsym = sp->subsym[j];
break;
}
}
}else if( sp->prec>=0 ){
rp->precsym = rp->rhs[i];
}
}
}
}
return;
}
/* Find all nonterminals which will generate the empty string.
** Then go back and compute the first sets of every nonterminal.
** The first set is the set of all terminal symbols which can begin
** a string generated by that nonterminal.
*/
void FindFirstSets(struct lemon *lemp)
{
int i, j;
struct rule *rp;
int progress;
for(i=0; i<lemp->nsymbol; i++){
lemp->symbols[i]->lambda = LEMON_FALSE;
}
for(i=lemp->nterminal; i<lemp->nsymbol; i++){
lemp->symbols[i]->firstset = SetNew();
}
/* First compute all lambdas */
do{
progress = 0;
for(rp=lemp->rule; rp; rp=rp->next){
if( rp->lhs->lambda ) continue;
for(i=0; i<rp->nrhs; i++){
struct symbol *sp = rp->rhs[i];
assert( sp->type==NONTERMINAL || sp->lambda==LEMON_FALSE );
if( sp->lambda==LEMON_FALSE ) break;
}
if( i==rp->nrhs ){
rp->lhs->lambda = LEMON_TRUE;
progress = 1;
}
}
}while( progress );
/* Now compute all first sets */
do{
struct symbol *s1, *s2;
progress = 0;
for(rp=lemp->rule; rp; rp=rp->next){
s1 = rp->lhs;
for(i=0; i<rp->nrhs; i++){
s2 = rp->rhs[i];
if( s2->type==TERMINAL ){
progress += SetAdd(s1->firstset,s2->index);
break;
}else if( s2->type==MULTITERMINAL ){
for(j=0; j<s2->nsubsym; j++){
progress += SetAdd(s1->firstset,s2->subsym[j]->index);
}
break;
}else if( s1==s2 ){
if( s1->lambda==LEMON_FALSE ) break;
}else{
progress += SetUnion(s1->firstset,s2->firstset);
if( s2->lambda==LEMON_FALSE ) break;
}
}
}
}while( progress );
return;
}
/* Compute all LR(0) states for the grammar. Links
** are added to between some states so that the LR(1) follow sets
** can be computed later.
*/
PRIVATE struct state *getstate(struct lemon *); /* forward reference */
void FindStates(struct lemon *lemp)
{
struct symbol *sp;
struct rule *rp;
Configlist_init();
/* Find the start symbol */
if( lemp->start ){
sp = Symbol_find(lemp->start);
if( sp==0 ){
ErrorMsg(lemp->filename,0,
"The specified start symbol \"%s\" is not "
"in a nonterminal of the grammar. \"%s\" will be used as the start "
"symbol instead.",lemp->start,lemp->startRule->lhs->name);
lemp->errorcnt++;
sp = lemp->startRule->lhs;
}
}else if( lemp->startRule ){
sp = lemp->startRule->lhs;
}else{
ErrorMsg(lemp->filename,0,"Internal error - no start rule\n");
exit(1);
}
/* Make sure the start symbol doesn't occur on the right-hand side of
** any rule. Report an error if it does. (YACC would generate a new
** start symbol in this case.) */
for(rp=lemp->rule; rp; rp=rp->next){
int i;
for(i=0; i<rp->nrhs; i++){
if( rp->rhs[i]==sp ){ /* FIX ME: Deal with multiterminals */
ErrorMsg(lemp->filename,0,
"The start symbol \"%s\" occurs on the "
"right-hand side of a rule. This will result in a parser which "
"does not work properly.",sp->name);
lemp->errorcnt++;
}
}
}
/* The basis configuration set for the first state
** is all rules which have the start symbol as their
** left-hand side */
for(rp=sp->rule; rp; rp=rp->nextlhs){
struct config *newcfp;
rp->lhsStart = 1;
newcfp = Configlist_addbasis(rp,0);
SetAdd(newcfp->fws,0);
}
/* Compute the first state. All other states will be
** computed automatically during the computation of the first one.
** The returned pointer to the first state is not used. */
(void)getstate(lemp);
return;
}
/* Return a pointer to a state which is described by the configuration
** list which has been built from calls to Configlist_add.
*/
PRIVATE void buildshifts(struct lemon *, struct state *); /* Forwd ref */
PRIVATE struct state *getstate(struct lemon *lemp)
{
struct config *cfp, *bp;
struct state *stp;
/* Extract the sorted basis of the new state. The basis was constructed
** by prior calls to "Configlist_addbasis()". */
Configlist_sortbasis();
bp = Configlist_basis();
/* Get a state with the same basis */
stp = State_find(bp);
if( stp ){
/* A state with the same basis already exists! Copy all the follow-set
** propagation links from the state under construction into the
** preexisting state, then return a pointer to the preexisting state */
struct config *x, *y;
for(x=bp, y=stp->bp; x && y; x=x->bp, y=y->bp){
Plink_copy(&y->bplp,x->bplp);
Plink_delete(x->fplp);
x->fplp = x->bplp = 0;
}
cfp = Configlist_return();
Configlist_eat(cfp);
}else{
/* This really is a new state. Construct all the details */
Configlist_closure(lemp); /* Compute the configuration closure */
Configlist_sort(); /* Sort the configuration closure */
cfp = Configlist_return(); /* Get a pointer to the config list */
stp = State_new(); /* A new state structure */
MemoryCheck(stp);
stp->bp = bp; /* Remember the configuration basis */
stp->cfp = cfp; /* Remember the configuration closure */
stp->statenum = lemp->nstate++; /* Every state gets a sequence number */
stp->ap = 0; /* No actions, yet. */
State_insert(stp,stp->bp); /* Add to the state table */
buildshifts(lemp,stp); /* Recursively compute successor states */
}
return stp;
}
/*
** Return true if two symbols are the same.
*/
int same_symbol(struct symbol *a, struct symbol *b)
{
int i;
if( a==b ) return 1;
if( a->type!=MULTITERMINAL ) return 0;
if( b->type!=MULTITERMINAL ) return 0;
if( a->nsubsym!=b->nsubsym ) return 0;
for(i=0; i<a->nsubsym; i++){
if( a->subsym[i]!=b->subsym[i] ) return 0;
}
return 1;
}
/* Construct all successor states to the given state. A "successor"
** state is any state which can be reached by a shift action.
*/
PRIVATE void buildshifts(struct lemon *lemp, struct state *stp)
{
struct config *cfp; /* For looping thru the config closure of "stp" */
struct config *bcfp; /* For the inner loop on config closure of "stp" */
struct config *newcfg; /* */
struct symbol *sp; /* Symbol following the dot in configuration "cfp" */
struct symbol *bsp; /* Symbol following the dot in configuration "bcfp" */
struct state *newstp; /* A pointer to a successor state */
/* Each configuration becomes complete after it contributes to a successor
** state. Initially, all configurations are incomplete */
for(cfp=stp->cfp; cfp; cfp=cfp->next) cfp->status = INCOMPLETE;
/* Loop through all configurations of the state "stp" */
for(cfp=stp->cfp; cfp; cfp=cfp->next){
if( cfp->status==COMPLETE ) continue; /* Already used by inner loop */
if( cfp->dot>=cfp->rp->nrhs ) continue; /* Can't shift this config */
Configlist_reset(); /* Reset the new config set */
sp = cfp->rp->rhs[cfp->dot]; /* Symbol after the dot */
/* For every configuration in the state "stp" which has the symbol "sp"
** following its dot, add the same configuration to the basis set under
** construction but with the dot shifted one symbol to the right. */
for(bcfp=cfp; bcfp; bcfp=bcfp->next){
if( bcfp->status==COMPLETE ) continue; /* Already used */
if( bcfp->dot>=bcfp->rp->nrhs ) continue; /* Can't shift this one */
bsp = bcfp->rp->rhs[bcfp->dot]; /* Get symbol after dot */
if( !same_symbol(bsp,sp) ) continue; /* Must be same as for "cfp" */
bcfp->status = COMPLETE; /* Mark this config as used */
newcfg = Configlist_addbasis(bcfp->rp,bcfp->dot+1);
Plink_add(&newcfg->bplp,bcfp);
}
/* Get a pointer to the state described by the basis configuration set
** constructed in the preceding loop */
newstp = getstate(lemp);
/* The state "newstp" is reached from the state "stp" by a shift action
** on the symbol "sp" */
if( sp->type==MULTITERMINAL ){
int i;
for(i=0; i<sp->nsubsym; i++){
Action_add(&stp->ap,SHIFT,sp->subsym[i],(char*)newstp);
}
}else{
Action_add(&stp->ap,SHIFT,sp,(char *)newstp);
}
}
}
/*
** Construct the propagation links
*/
void FindLinks(struct lemon *lemp)
{
int i;
struct config *cfp, *other;
struct state *stp;
struct plink *plp;
/* Housekeeping detail:
** Add to every propagate link a pointer back to the state to
** which the link is attached. */
for(i=0; i<lemp->nstate; i++){
stp = lemp->sorted[i];
for(cfp=stp?stp->cfp:0; cfp; cfp=cfp->next){
cfp->stp = stp;
}
}
/* Convert all backlinks into forward links. Only the forward
** links are used in the follow-set computation. */
for(i=0; i<lemp->nstate; i++){
stp = lemp->sorted[i];
for(cfp=stp?stp->cfp:0; cfp; cfp=cfp->next){
for(plp=cfp->bplp; plp; plp=plp->next){
other = plp->cfp;
Plink_add(&other->fplp,cfp);
}
}
}
}
/* Compute all followsets.
**
** A followset is the set of all symbols which can come immediately
** after a configuration.
*/
void FindFollowSets(struct lemon *lemp)
{
int i;
struct config *cfp;
struct plink *plp;
int progress;
int change;
for(i=0; i<lemp->nstate; i++){
assert( lemp->sorted[i]!=0 );
for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){
cfp->status = INCOMPLETE;
}
}
do{
progress = 0;
for(i=0; i<lemp->nstate; i++){
assert( lemp->sorted[i]!=0 );
for(cfp=lemp->sorted[i]->cfp; cfp; cfp=cfp->next){
if( cfp->status==COMPLETE ) continue;
for(plp=cfp->fplp; plp; plp=plp->next){
change = SetUnion(plp->cfp->fws,cfp->fws);
if( change ){
plp->cfp->status = INCOMPLETE;
progress = 1;
}
}
cfp->status = COMPLETE;
}
}
}while( progress );
}
static int resolve_conflict(struct action *,struct action *);
/* Compute the reduce actions, and resolve conflicts.
*/
void FindActions(struct lemon *lemp)
{
int i,j;
struct config *cfp;
struct state *stp;
struct symbol *sp;
struct rule *rp;
/* Add all of the reduce actions
** A reduce action is added for each element of the followset of
** a configuration which has its dot at the extreme right.
*/
for(i=0; i<lemp->nstate; i++){ /* Loop over all states */
stp = lemp->sorted[i];
for(cfp=stp->cfp; cfp; cfp=cfp->next){ /* Loop over all configurations */
if( cfp->rp->nrhs==cfp->dot ){ /* Is dot at extreme right? */
for(j=0; j<lemp->nterminal; j++){
if( SetFind(cfp->fws,j) ){
/* Add a reduce action to the state "stp" which will reduce by the
** rule "cfp->rp" if the lookahead symbol is "lemp->symbols[j]" */
Action_add(&stp->ap,REDUCE,lemp->symbols[j],(char *)cfp->rp);
}
}
}
}
}
/* Add the accepting token */
if( lemp->start ){
sp = Symbol_find(lemp->start);
if( sp==0 ){
if( lemp->startRule==0 ){
fprintf(stderr, "internal error on source line %d: no start rule\n",
__LINE__);
exit(1);
}
sp = lemp->startRule->lhs;
}
}else{
sp = lemp->startRule->lhs;
}
/* Add to the first state (which is always the starting state of the
** finite state machine) an action to ACCEPT if the lookahead is the
** start nonterminal. */
Action_add(&lemp->sorted[0]->ap,ACCEPT,sp,0);
/* Resolve conflicts */
for(i=0; i<lemp->nstate; i++){
struct action *ap, *nap;
stp = lemp->sorted[i];
/* assert( stp->ap ); */
stp->ap = Action_sort(stp->ap);
for(ap=stp->ap; ap && ap->next; ap=ap->next){
for(nap=ap->next; nap && nap->sp==ap->sp; nap=nap->next){
/* The two actions "ap" and "nap" have the same lookahead.
** Figure out which one should be used */
lemp->nconflict += resolve_conflict(ap,nap);
}
}
}
/* Report an error for each rule that can never be reduced. */
for(rp=lemp->rule; rp; rp=rp->next) rp->canReduce = LEMON_FALSE;
for(i=0; i<lemp->nstate; i++){
struct action *ap;
for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){
if( ap->type==REDUCE ) ap->x.rp->canReduce = LEMON_TRUE;
}
}
for(rp=lemp->rule; rp; rp=rp->next){
if( rp->canReduce ) continue;
ErrorMsg(lemp->filename,rp->ruleline,"This rule can not be reduced.\n");
lemp->errorcnt++;
}
}
/* Resolve a conflict between the two given actions. If the
** conflict can't be resolved, return non-zero.
**
** NO LONGER TRUE:
** To resolve a conflict, first look to see if either action
** is on an error rule. In that case, take the action which
** is not associated with the error rule. If neither or both
** actions are associated with an error rule, then try to
** use precedence to resolve the conflict.
**
** If either action is a SHIFT, then it must be apx. This
** function won't work if apx->type==REDUCE and apy->type==SHIFT.
*/
static int resolve_conflict(
struct action *apx,
struct action *apy
){
struct symbol *spx, *spy;
int errcnt = 0;
assert( apx->sp==apy->sp ); /* Otherwise there would be no conflict */
if( apx->type==SHIFT && apy->type==SHIFT ){
apy->type = SSCONFLICT;
errcnt++;
}
if( apx->type==SHIFT && apy->type==REDUCE ){
spx = apx->sp;
spy = apy->x.rp->precsym;
if( spy==0 || spx->prec<0 || spy->prec<0 ){
/* Not enough precedence information. */
apy->type = SRCONFLICT;
errcnt++;
}else if( spx->prec>spy->prec ){ /* higher precedence wins */
apy->type = RD_RESOLVED;
}else if( spx->prec<spy->prec ){
apx->type = SH_RESOLVED;
}else if( spx->prec==spy->prec && spx->assoc==RIGHT ){ /* Use operator */
apy->type = RD_RESOLVED; /* associativity */
}else if( spx->prec==spy->prec && spx->assoc==LEFT ){ /* to break tie */
apx->type = SH_RESOLVED;
}else{
assert( spx->prec==spy->prec && spx->assoc==NONE );
apx->type = ERROR;
}
}else if( apx->type==REDUCE && apy->type==REDUCE ){
spx = apx->x.rp->precsym;
spy = apy->x.rp->precsym;
if( spx==0 || spy==0 || spx->prec<0 ||
spy->prec<0 || spx->prec==spy->prec ){
apy->type = RRCONFLICT;
errcnt++;
}else if( spx->prec>spy->prec ){
apy->type = RD_RESOLVED;
}else if( spx->prec<spy->prec ){
apx->type = RD_RESOLVED;
}
}else{
assert(
apx->type==SH_RESOLVED ||
apx->type==RD_RESOLVED ||
apx->type==SSCONFLICT ||
apx->type==SRCONFLICT ||
apx->type==RRCONFLICT ||
apy->type==SH_RESOLVED ||
apy->type==RD_RESOLVED ||
apy->type==SSCONFLICT ||
apy->type==SRCONFLICT ||
apy->type==RRCONFLICT
);
/* The REDUCE/SHIFT case cannot happen because SHIFTs come before
** REDUCEs on the list. If we reach this point it must be because
** the parser conflict had already been resolved. */
}
return errcnt;
}
/********************* From the file "configlist.c" *************************/
/*
** Routines to processing a configuration list and building a state
** in the LEMON parser generator.
*/
static struct config *freelist = 0; /* List of free configurations */
static struct config *current = 0; /* Top of list of configurations */
static struct config **currentend = 0; /* Last on list of configs */
static struct config *basis = 0; /* Top of list of basis configs */
static struct config **basisend = 0; /* End of list of basis configs */
/* Return a pointer to a new configuration */
PRIVATE struct config *newconfig(void){
return (struct config*)calloc(1, sizeof(struct config));
}
/* The configuration "old" is no longer used */
PRIVATE void deleteconfig(struct config *old)
{
old->next = freelist;
freelist = old;
}
/* Initialized the configuration list builder */
void Configlist_init(void){
current = 0;
currentend = ¤t;
basis = 0;
basisend = &basis;
Configtable_init();
return;
}
/* Initialized the configuration list builder */
void Configlist_reset(void){
current = 0;
currentend = ¤t;
basis = 0;
basisend = &basis;
Configtable_clear(0);
return;
}
/* Add another configuration to the configuration list */
struct config *Configlist_add(
struct rule *rp, /* The rule */
int dot /* Index into the RHS of the rule where the dot goes */
){
struct config *cfp, model;
assert( currentend!=0 );
model.rp = rp;
model.dot = dot;
cfp = Configtable_find(&model);
if( cfp==0 ){
cfp = newconfig();
cfp->rp = rp;
cfp->dot = dot;
cfp->fws = SetNew();
cfp->stp = 0;
cfp->fplp = cfp->bplp = 0;
cfp->next = 0;
cfp->bp = 0;
*currentend = cfp;
currentend = &cfp->next;
Configtable_insert(cfp);
}
return cfp;
}
/* Add a basis configuration to the configuration list */
struct config *Configlist_addbasis(struct rule *rp, int dot)
{
struct config *cfp, model;
assert( basisend!=0 );
assert( currentend!=0 );
model.rp = rp;
model.dot = dot;
cfp = Configtable_find(&model);
if( cfp==0 ){
cfp = newconfig();
cfp->rp = rp;
cfp->dot = dot;
cfp->fws = SetNew();
cfp->stp = 0;
cfp->fplp = cfp->bplp = 0;
cfp->next = 0;
cfp->bp = 0;
*currentend = cfp;
currentend = &cfp->next;
*basisend = cfp;
basisend = &cfp->bp;
Configtable_insert(cfp);
}
return cfp;
}
/* Compute the closure of the configuration list */
void Configlist_closure(struct lemon *lemp)
{
struct config *cfp, *newcfp;
struct rule *rp, *newrp;
struct symbol *sp, *xsp;
int i, dot;
assert( currentend!=0 );
for(cfp=current; cfp; cfp=cfp->next){
rp = cfp->rp;
dot = cfp->dot;
if( dot>=rp->nrhs ) continue;
sp = rp->rhs[dot];
if( sp->type==NONTERMINAL ){
if( sp->rule==0 && sp!=lemp->errsym ){
ErrorMsg(lemp->filename,rp->line,"Nonterminal \"%s\" has no rules.",
sp->name);
lemp->errorcnt++;
}
for(newrp=sp->rule; newrp; newrp=newrp->nextlhs){
newcfp = Configlist_add(newrp,0);
for(i=dot+1; i<rp->nrhs; i++){
xsp = rp->rhs[i];
if( xsp->type==TERMINAL ){
SetAdd(newcfp->fws,xsp->index);
break;
}else if( xsp->type==MULTITERMINAL ){
int k;
for(k=0; k<xsp->nsubsym; k++){
SetAdd(newcfp->fws, xsp->subsym[k]->index);
}
break;
}else{
SetUnion(newcfp->fws,xsp->firstset);
if( xsp->lambda==LEMON_FALSE ) break;
}
}
if( i==rp->nrhs ) Plink_add(&cfp->fplp,newcfp);
}
}
}
return;
}
/* Sort the configuration list */
void Configlist_sort(void){
current = (struct config*)msort((char*)current,(char**)&(current->next),
Configcmp);
currentend = 0;
return;
}
/* Sort the basis configuration list */
void Configlist_sortbasis(void){
basis = (struct config*)msort((char*)current,(char**)&(current->bp),
Configcmp);
basisend = 0;
return;
}
/* Return a pointer to the head of the configuration list and
** reset the list */
struct config *Configlist_return(void){
struct config *old;
old = current;
current = 0;
currentend = 0;
return old;
}
/* Return a pointer to the head of the configuration list and
** reset the list */
struct config *Configlist_basis(void){
struct config *old;
old = basis;
basis = 0;
basisend = 0;
return old;
}
/* Free all elements of the given configuration list */
void Configlist_eat(struct config *cfp)
{
struct config *nextcfp;
for(; cfp; cfp=nextcfp){
nextcfp = cfp->next;
assert( cfp->fplp==0 );
assert( cfp->bplp==0 );
if( cfp->fws ) SetFree(cfp->fws);
deleteconfig(cfp);
}
return;
}
/***************** From the file "error.c" *********************************/
/*
** Code for printing error message.
*/
void ErrorMsg(const char *filename, int lineno, const char *format, ...){
va_list ap;
fprintf(stderr, "%s:%d: ", filename, lineno);
va_start(ap, format);
vfprintf(stderr,format,ap);
va_end(ap);
fprintf(stderr, "\n");
}
/**************** From the file "main.c" ************************************/
/*
** Main program file for the LEMON parser generator.
*/
/* Report an out-of-memory condition and abort. This function
** is used mostly by the "MemoryCheck" macro in struct.h
*/
void memory_error(void){
fprintf(stderr,"Out of memory. Aborting...\n");
exit(1);
}
static int nDefine = 0; /* Number of -D options on the command line */
static char **azDefine = 0; /* Name of the -D macros */
/* This routine is called with the argument to each -D command-line option.
** Add the macro defined to the azDefine array.
*/
static void handle_D_option(char *z){
char **paz;
nDefine++;
azDefine = (char **) realloc(azDefine, sizeof(azDefine[0])*nDefine);
if( azDefine==0 ){
fprintf(stderr,"out of memory\n");
exit(1);
}
paz = &azDefine[nDefine-1];
*paz = (char *) malloc( lemonStrlen(z)+1 );
if( *paz==0 ){
fprintf(stderr,"out of memory\n");
exit(1);
}
lemon_strcpy(*paz, z);
for(z=*paz; *z && *z!='='; z++){}
*z = 0;
}
/* Rember the name of the output directory
*/
static char *outputDir = NULL;
static void handle_d_option(char *z){
outputDir = (char *) malloc( lemonStrlen(z)+1 );
if( outputDir==0 ){
fprintf(stderr,"out of memory\n");
exit(1);
}
lemon_strcpy(outputDir, z);
}
static char *user_templatename = NULL;
static void handle_T_option(char *z){
user_templatename = (char *) malloc( lemonStrlen(z)+1 );
if( user_templatename==0 ){
memory_error();
}
lemon_strcpy(user_templatename, z);
}
/* Merge together to lists of rules ordered by rule.iRule */
static struct rule *Rule_merge(struct rule *pA, struct rule *pB){
struct rule *pFirst = 0;
struct rule **ppPrev = &pFirst;
while( pA && pB ){
if( pA->iRule<pB->iRule ){
*ppPrev = pA;
ppPrev = &pA->next;
pA = pA->next;
}else{
*ppPrev = pB;
ppPrev = &pB->next;
pB = pB->next;
}
}
if( pA ){
*ppPrev = pA;
}else{
*ppPrev = pB;
}
return pFirst;
}
/*
** Sort a list of rules in order of increasing iRule value
*/
static struct rule *Rule_sort(struct rule *rp){
unsigned int i;
struct rule *pNext;
struct rule *x[32];
memset(x, 0, sizeof(x));
while( rp ){
pNext = rp->next;
rp->next = 0;
for(i=0; i<sizeof(x)/sizeof(x[0])-1 && x[i]; i++){
rp = Rule_merge(x[i], rp);
x[i] = 0;
}
x[i] = rp;
rp = pNext;
}
rp = 0;
for(i=0; i<sizeof(x)/sizeof(x[0]); i++){
rp = Rule_merge(x[i], rp);
}
return rp;
}
/* forward reference */
static const char *minimum_size_type(int lwr, int upr, int *pnByte);
/* Print a single line of the "Parser Stats" output
*/
static void stats_line(const char *zLabel, int iValue){
int nLabel = lemonStrlen(zLabel);
printf(" %s%.*s %5d\n", zLabel,
35-nLabel, "................................",
iValue);
}
/* The main program. Parse the command line and do it... */
int main(int argc, char **argv){
static int version = 0;
static int rpflag = 0;
static int basisflag = 0;
static int compress = 0;
static int quiet = 0;
static int statistics = 0;
static int mhflag = 0;
static int nolinenosflag = 0;
static int noResort = 0;
static int sqlFlag = 0;
static int printPP = 0;
static struct s_options options[] = {
{OPT_FLAG, "b", (char*)&basisflag, "Print only the basis in report."},
{OPT_FLAG, "c", (char*)&compress, "Don't compress the action table."},
{OPT_FSTR, "d", (char*)&handle_d_option, "Output directory. Default '.'"},
{OPT_FSTR, "D", (char*)handle_D_option, "Define an %ifdef macro."},
{OPT_FLAG, "E", (char*)&printPP, "Print input file after preprocessing."},
{OPT_FSTR, "f", 0, "Ignored. (Placeholder for -f compiler options.)"},
{OPT_FLAG, "g", (char*)&rpflag, "Print grammar without actions."},
{OPT_FSTR, "I", 0, "Ignored. (Placeholder for '-I' compiler options.)"},
{OPT_FLAG, "m", (char*)&mhflag, "Output a makeheaders compatible file."},
{OPT_FLAG, "l", (char*)&nolinenosflag, "Do not print #line statements."},
{OPT_FSTR, "O", 0, "Ignored. (Placeholder for '-O' compiler options.)"},
{OPT_FLAG, "p", (char*)&showPrecedenceConflict,
"Show conflicts resolved by precedence rules"},
{OPT_FLAG, "q", (char*)&quiet, "(Quiet) Don't print the report file."},
{OPT_FLAG, "r", (char*)&noResort, "Do not sort or renumber states"},
{OPT_FLAG, "s", (char*)&statistics,
"Print parser stats to standard output."},
{OPT_FLAG, "S", (char*)&sqlFlag,
"Generate the *.sql file describing the parser tables."},
{OPT_FLAG, "x", (char*)&version, "Print the version number."},
{OPT_FSTR, "T", (char*)handle_T_option, "Specify a template file."},
{OPT_FSTR, "W", 0, "Ignored. (Placeholder for '-W' compiler options.)"},
{OPT_FLAG,0,0,0}
};
int i;
int exitcode;
struct lemon lem;
struct rule *rp;
(void)argc;
OptInit(argv,options,stderr);
if( version ){
printf("Lemon version 1.0\n");
exit(0);
}
if( OptNArgs()!=1 ){
fprintf(stderr,"Exactly one filename argument is required.\n");
exit(1);
}
memset(&lem, 0, sizeof(lem));
lem.errorcnt = 0;
/* Initialize the machine */
Strsafe_init();
Symbol_init();
State_init();
lem.argv0 = argv[0];
lem.filename = OptArg(0);
lem.basisflag = basisflag;
lem.nolinenosflag = nolinenosflag;
lem.printPreprocessed = printPP;
Symbol_new("$");
/* Parse the input file */
Parse(&lem);
if( lem.printPreprocessed || lem.errorcnt ) exit(lem.errorcnt);
if( lem.nrule==0 ){
fprintf(stderr,"Empty grammar.\n");
exit(1);
}
lem.errsym = Symbol_find("error");
/* Count and index the symbols of the grammar */
Symbol_new("{default}");
lem.nsymbol = Symbol_count();
lem.symbols = Symbol_arrayof();
for(i=0; i<lem.nsymbol; i++) lem.symbols[i]->index = i;
qsort(lem.symbols,lem.nsymbol,sizeof(struct symbol*), Symbolcmpp);
for(i=0; i<lem.nsymbol; i++) lem.symbols[i]->index = i;
while( lem.symbols[i-1]->type==MULTITERMINAL ){ i--; }
assert( strcmp(lem.symbols[i-1]->name,"{default}")==0 );
lem.nsymbol = i - 1;
for(i=1; ISUPPER(lem.symbols[i]->name[0]); i++);
lem.nterminal = i;
/* Assign sequential rule numbers. Start with 0. Put rules that have no
** reduce action C-code associated with them last, so that the switch()
** statement that selects reduction actions will have a smaller jump table.
*/
for(i=0, rp=lem.rule; rp; rp=rp->next){
rp->iRule = rp->code ? i++ : -1;
}
lem.nruleWithAction = i;
for(rp=lem.rule; rp; rp=rp->next){
if( rp->iRule<0 ) rp->iRule = i++;
}
lem.startRule = lem.rule;
lem.rule = Rule_sort(lem.rule);
/* Generate a reprint of the grammar, if requested on the command line */
if( rpflag ){
Reprint(&lem);
}else{
/* Initialize the size for all follow and first sets */
SetSize(lem.nterminal+1);
/* Find the precedence for every production rule (that has one) */
FindRulePrecedences(&lem);
/* Compute the lambda-nonterminals and the first-sets for every
** nonterminal */
FindFirstSets(&lem);
/* Compute all LR(0) states. Also record follow-set propagation
** links so that the follow-set can be computed later */
lem.nstate = 0;
FindStates(&lem);
lem.sorted = State_arrayof();
/* Tie up loose ends on the propagation links */
FindLinks(&lem);
/* Compute the follow set of every reducible configuration */
FindFollowSets(&lem);
/* Compute the action tables */
FindActions(&lem);
/* Compress the action tables */
if( compress==0 ) CompressTables(&lem);
/* Reorder and renumber the states so that states with fewer choices
** occur at the end. This is an optimization that helps make the
** generated parser tables smaller. */
if( noResort==0 ) ResortStates(&lem);
/* Generate a report of the parser generated. (the "y.output" file) */
if( !quiet ) ReportOutput(&lem);
/* Generate the source code for the parser */
ReportTable(&lem, mhflag, sqlFlag);
/* Produce a header file for use by the scanner. (This step is
** omitted if the "-m" option is used because makeheaders will
** generate the file for us.) */
if( !mhflag ) ReportHeader(&lem);
}
if( statistics ){
printf("Parser statistics:\n");
stats_line("terminal symbols", lem.nterminal);
stats_line("non-terminal symbols", lem.nsymbol - lem.nterminal);
stats_line("total symbols", lem.nsymbol);
stats_line("rules", lem.nrule);
stats_line("states", lem.nxstate);
stats_line("conflicts", lem.nconflict);
stats_line("action table entries", lem.nactiontab);
stats_line("lookahead table entries", lem.nlookaheadtab);
stats_line("total table size (bytes)", lem.tablesize);
}
if( lem.nconflict > 0 ){
fprintf(stderr,"%d parsing conflicts.\n",lem.nconflict);
}
/* return 0 on success, 1 on failure. */
exitcode = ((lem.errorcnt > 0) || (lem.nconflict > 0)) ? 1 : 0;
exit(exitcode);
return (exitcode);
}
/******************** From the file "msort.c" *******************************/
/*
** A generic merge-sort program.
**
** USAGE:
** Let "ptr" be a pointer to some structure which is at the head of
** a null-terminated list. Then to sort the list call:
**
** ptr = msort(ptr,&(ptr->next),cmpfnc);
**
** In the above, "cmpfnc" is a pointer to a function which compares
** two instances of the structure and returns an integer, as in
** strcmp. The second argument is a pointer to the pointer to the
** second element of the linked list. This address is used to compute
** the offset to the "next" field within the structure. The offset to
** the "next" field must be constant for all structures in the list.
**
** The function returns a new pointer which is the head of the list
** after sorting.
**
** ALGORITHM:
** Merge-sort.
*/
/*
** Return a pointer to the next structure in the linked list.
*/
#define NEXT(A) (*(char**)(((char*)A)+offset))
/*
** Inputs:
** a: A sorted, null-terminated linked list. (May be null).
** b: A sorted, null-terminated linked list. (May be null).
** cmp: A pointer to the comparison function.
** offset: Offset in the structure to the "next" field.
**
** Return Value:
** A pointer to the head of a sorted list containing the elements
** of both a and b.
**
** Side effects:
** The "next" pointers for elements in the lists a and b are
** changed.
*/
static char *merge(
char *a,
char *b,
int (*cmp)(const char*,const char*),
int offset
){
char *ptr, *head;
if( a==0 ){
head = b;
}else if( b==0 ){
head = a;
}else{
if( (*cmp)(a,b)<=0 ){
ptr = a;
a = NEXT(a);
}else{
ptr = b;
b = NEXT(b);
}
head = ptr;
while( a && b ){
if( (*cmp)(a,b)<=0 ){
NEXT(ptr) = a;
ptr = a;
a = NEXT(a);
}else{
NEXT(ptr) = b;
ptr = b;
b = NEXT(b);
}
}
if( a ) NEXT(ptr) = a;
else NEXT(ptr) = b;
}
return head;
}
/*
** Inputs:
** list: Pointer to a singly-linked list of structures.
** next: Pointer to pointer to the second element of the list.
** cmp: A comparison function.
**
** Return Value:
** A pointer to the head of a sorted list containing the elements
** originally in list.
**
** Side effects:
** The "next" pointers for elements in list are changed.
*/
#define LISTSIZE 30
static char *msort(
char *list,
char **next,
int (*cmp)(const char*,const char*)
){
unsigned long offset;
char *ep;
char *set[LISTSIZE];
int i;
offset = (unsigned long)((char*)next - (char*)list);
for(i=0; i<LISTSIZE; i++) set[i] = 0;
while( list ){
ep = list;
list = NEXT(list);
NEXT(ep) = 0;
for(i=0; i<LISTSIZE-1 && set[i]!=0; i++){
ep = merge(ep,set[i],cmp,offset);
set[i] = 0;
}
set[i] = ep;
}
ep = 0;
for(i=0; i<LISTSIZE; i++) if( set[i] ) ep = merge(set[i],ep,cmp,offset);
return ep;
}
/************************ From the file "option.c" **************************/
static char **g_argv;
static struct s_options *op;
static FILE *errstream;
#define ISOPT(X) ((X)[0]=='-'||(X)[0]=='+'||strchr((X),'=')!=0)
/*
** Print the command line with a carrot pointing to the k-th character
** of the n-th field.
*/
static void errline(int n, int k, FILE *err)
{
int spcnt, i;
if( g_argv[0] ){
fprintf(err,"%s",g_argv[0]);
spcnt = lemonStrlen(g_argv[0]) + 1;
}else{
spcnt = 0;
}
for(i=1; i<n && g_argv[i]; i++){
fprintf(err," %s",g_argv[i]);
spcnt += lemonStrlen(g_argv[i])+1;
}
spcnt += k;
for(; g_argv[i]; i++) fprintf(err," %s",g_argv[i]);
if( spcnt<20 ){
fprintf(err,"\n%*s^-- here\n",spcnt,"");
}else{
fprintf(err,"\n%*shere --^\n",spcnt-7,"");
}
}
/*
** Return the index of the N-th non-switch argument. Return -1
** if N is out of range.
*/
static int argindex(int n)
{
int i;
int dashdash = 0;
if( g_argv!=0 && *g_argv!=0 ){
for(i=1; g_argv[i]; i++){
if( dashdash || !ISOPT(g_argv[i]) ){
if( n==0 ) return i;
n--;
}
if( strcmp(g_argv[i],"--")==0 ) dashdash = 1;
}
}
return -1;
}
static char emsg[] = "Command line syntax error: ";
/*
** Process a flag command line argument.
*/
static int handleflags(int i, FILE *err)
{
int v;
int errcnt = 0;
int j;
for(j=0; op[j].label; j++){
if( strncmp(&g_argv[i][1],op[j].label,lemonStrlen(op[j].label))==0 ) break;
}
v = g_argv[i][0]=='-' ? 1 : 0;
if( op[j].label==0 ){
if( err ){
fprintf(err,"%sundefined option.\n",emsg);
errline(i,1,err);
}
errcnt++;
}else if( op[j].arg==0 ){
/* Ignore this option */
}else if( op[j].type==OPT_FLAG ){
*((int*)op[j].arg) = v;
}else if( op[j].type==OPT_FFLAG ){
(*(void(*)(int))(op[j].arg))(v);
}else if( op[j].type==OPT_FSTR ){
(*(void(*)(char *))(op[j].arg))(&g_argv[i][2]);
}else{
if( err ){
fprintf(err,"%smissing argument on switch.\n",emsg);
errline(i,1,err);
}
errcnt++;
}
return errcnt;
}
/*
** Process a command line switch which has an argument.
*/
static int handleswitch(int i, FILE *err)
{
int lv = 0;
double dv = 0.0;
char *sv = 0, *end;
char *cp;
int j;
int errcnt = 0;
cp = strchr(g_argv[i],'=');
assert( cp!=0 );
*cp = 0;
for(j=0; op[j].label; j++){
if( strcmp(g_argv[i],op[j].label)==0 ) break;
}
*cp = '=';
if( op[j].label==0 ){
if( err ){
fprintf(err,"%sundefined option.\n",emsg);
errline(i,0,err);
}
errcnt++;
}else{
cp++;
switch( op[j].type ){
case OPT_FLAG:
case OPT_FFLAG:
if( err ){
fprintf(err,"%soption requires an argument.\n",emsg);
errline(i,0,err);
}
errcnt++;
break;
case OPT_DBL:
case OPT_FDBL:
dv = strtod(cp,&end);
if( *end ){
if( err ){
fprintf(err,
"%sillegal character in floating-point argument.\n",emsg);
errline(i,(int)((char*)end-(char*)g_argv[i]),err);
}
errcnt++;
}
break;
case OPT_INT:
case OPT_FINT:
lv = strtol(cp,&end,0);
if( *end ){
if( err ){
fprintf(err,"%sillegal character in integer argument.\n",emsg);
errline(i,(int)((char*)end-(char*)g_argv[i]),err);
}
errcnt++;
}
break;
case OPT_STR:
case OPT_FSTR:
sv = cp;
break;
}
switch( op[j].type ){
case OPT_FLAG:
case OPT_FFLAG:
break;
case OPT_DBL:
*(double*)(op[j].arg) = dv;
break;
case OPT_FDBL:
(*(void(*)(double))(op[j].arg))(dv);
break;
case OPT_INT:
*(int*)(op[j].arg) = lv;
break;
case OPT_FINT:
(*(void(*)(int))(op[j].arg))((int)lv);
break;
case OPT_STR:
*(char**)(op[j].arg) = sv;
break;
case OPT_FSTR:
(*(void(*)(char *))(op[j].arg))(sv);
break;
}
}
return errcnt;
}
int OptInit(char **a, struct s_options *o, FILE *err)
{
int errcnt = 0;
g_argv = a;
op = o;
errstream = err;
if( g_argv && *g_argv && op ){
int i;
for(i=1; g_argv[i]; i++){
if( g_argv[i][0]=='+' || g_argv[i][0]=='-' ){
errcnt += handleflags(i,err);
}else if( strchr(g_argv[i],'=') ){
errcnt += handleswitch(i,err);
}
}
}
if( errcnt>0 ){
fprintf(err,"Valid command line options for \"%s\" are:\n",*a);
OptPrint();
exit(1);
}
return 0;
}
int OptNArgs(void){
int cnt = 0;
int dashdash = 0;
int i;
if( g_argv!=0 && g_argv[0]!=0 ){
for(i=1; g_argv[i]; i++){
if( dashdash || !ISOPT(g_argv[i]) ) cnt++;
if( strcmp(g_argv[i],"--")==0 ) dashdash = 1;
}
}
return cnt;
}
char *OptArg(int n)
{
int i;
i = argindex(n);
return i>=0 ? g_argv[i] : 0;
}
void OptErr(int n)
{
int i;
i = argindex(n);
if( i>=0 ) errline(i,0,errstream);
}
void OptPrint(void){
int i;
int max, len;
max = 0;
for(i=0; op[i].label; i++){
len = lemonStrlen(op[i].label) + 1;
switch( op[i].type ){
case OPT_FLAG:
case OPT_FFLAG:
break;
case OPT_INT:
case OPT_FINT:
len += 9; /* length of "<integer>" */
break;
case OPT_DBL:
case OPT_FDBL:
len += 6; /* length of "<real>" */
break;
case OPT_STR:
case OPT_FSTR:
len += 8; /* length of "<string>" */
break;
}
if( len>max ) max = len;
}
for(i=0; op[i].label; i++){
switch( op[i].type ){
case OPT_FLAG:
case OPT_FFLAG:
fprintf(errstream," -%-*s %s\n",max,op[i].label,op[i].message);
break;
case OPT_INT:
case OPT_FINT:
fprintf(errstream," -%s<integer>%*s %s\n",op[i].label,
(int)(max-lemonStrlen(op[i].label)-9),"",op[i].message);
break;
case OPT_DBL:
case OPT_FDBL:
fprintf(errstream," -%s<real>%*s %s\n",op[i].label,
(int)(max-lemonStrlen(op[i].label)-6),"",op[i].message);
break;
case OPT_STR:
case OPT_FSTR:
fprintf(errstream," -%s<string>%*s %s\n",op[i].label,
(int)(max-lemonStrlen(op[i].label)-8),"",op[i].message);
break;
}
}
}
/*********************** From the file "parse.c" ****************************/
/*
** Input file parser for the LEMON parser generator.
*/
/* The state of the parser */
enum e_state {
INITIALIZE,
WAITING_FOR_DECL_OR_RULE,
WAITING_FOR_DECL_KEYWORD,
WAITING_FOR_DECL_ARG,
WAITING_FOR_PRECEDENCE_SYMBOL,
WAITING_FOR_ARROW,
IN_RHS,
LHS_ALIAS_1,
LHS_ALIAS_2,
LHS_ALIAS_3,
RHS_ALIAS_1,
RHS_ALIAS_2,
PRECEDENCE_MARK_1,
PRECEDENCE_MARK_2,
RESYNC_AFTER_RULE_ERROR,
RESYNC_AFTER_DECL_ERROR,
WAITING_FOR_DESTRUCTOR_SYMBOL,
WAITING_FOR_DATATYPE_SYMBOL,
WAITING_FOR_FALLBACK_ID,
WAITING_FOR_WILDCARD_ID,
WAITING_FOR_CLASS_ID,
WAITING_FOR_CLASS_TOKEN,
WAITING_FOR_TOKEN_NAME
};
struct pstate {
char *filename; /* Name of the input file */
int tokenlineno; /* Linenumber at which current token starts */
int errorcnt; /* Number of errors so far */
char *tokenstart; /* Text of current token */
struct lemon *gp; /* Global state vector */
enum e_state state; /* The state of the parser */
struct symbol *fallback; /* The fallback token */
struct symbol *tkclass; /* Token class symbol */
struct symbol *lhs; /* Left-hand side of current rule */
const char *lhsalias; /* Alias for the LHS */
int nrhs; /* Number of right-hand side symbols seen */
struct symbol *rhs[MAXRHS]; /* RHS symbols */
const char *alias[MAXRHS]; /* Aliases for each RHS symbol (or NULL) */
struct rule *prevrule; /* Previous rule parsed */
const char *declkeyword; /* Keyword of a declaration */
char **declargslot; /* Where the declaration argument should be put */
int insertLineMacro; /* Add #line before declaration insert */
int *decllinenoslot; /* Where to write declaration line number */
enum e_assoc declassoc; /* Assign this association to decl arguments */
int preccounter; /* Assign this precedence to decl arguments */
struct rule *firstrule; /* Pointer to first rule in the grammar */
struct rule *lastrule; /* Pointer to the most recently parsed rule */
};
/* Parse a single token */
static void parseonetoken(struct pstate *psp)
{
const char *x;
x = Strsafe(psp->tokenstart); /* Save the token permanently */
#if 0
printf("%s:%d: Token=[%s] state=%d\n",psp->filename,psp->tokenlineno,
x,psp->state);
#endif
switch( psp->state ){
case INITIALIZE:
psp->prevrule = 0;
psp->preccounter = 0;
psp->firstrule = psp->lastrule = 0;
psp->gp->nrule = 0;
/* fall through */
case WAITING_FOR_DECL_OR_RULE:
if( x[0]=='%' ){
psp->state = WAITING_FOR_DECL_KEYWORD;
}else if( ISLOWER(x[0]) ){
psp->lhs = Symbol_new(x);
psp->nrhs = 0;
psp->lhsalias = 0;
psp->state = WAITING_FOR_ARROW;
}else if( x[0]=='{' ){
if( psp->prevrule==0 ){
ErrorMsg(psp->filename,psp->tokenlineno,
"There is no prior rule upon which to attach the code "
"fragment which begins on this line.");
psp->errorcnt++;
}else if( psp->prevrule->code!=0 ){
ErrorMsg(psp->filename,psp->tokenlineno,
"Code fragment beginning on this line is not the first "
"to follow the previous rule.");
psp->errorcnt++;
}else if( strcmp(x, "{NEVER-REDUCE")==0 ){
psp->prevrule->neverReduce = 1;
}else{
psp->prevrule->line = psp->tokenlineno;
psp->prevrule->code = &x[1];
psp->prevrule->noCode = 0;
}
}else if( x[0]=='[' ){
psp->state = PRECEDENCE_MARK_1;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Token \"%s\" should be either \"%%\" or a nonterminal name.",
x);
psp->errorcnt++;
}
break;
case PRECEDENCE_MARK_1:
if( !ISUPPER(x[0]) ){
ErrorMsg(psp->filename,psp->tokenlineno,
"The precedence symbol must be a terminal.");
psp->errorcnt++;
}else if( psp->prevrule==0 ){
ErrorMsg(psp->filename,psp->tokenlineno,
"There is no prior rule to assign precedence \"[%s]\".",x);
psp->errorcnt++;
}else if( psp->prevrule->precsym!=0 ){
ErrorMsg(psp->filename,psp->tokenlineno,
"Precedence mark on this line is not the first "
"to follow the previous rule.");
psp->errorcnt++;
}else{
psp->prevrule->precsym = Symbol_new(x);
}
psp->state = PRECEDENCE_MARK_2;
break;
case PRECEDENCE_MARK_2:
if( x[0]!=']' ){
ErrorMsg(psp->filename,psp->tokenlineno,
"Missing \"]\" on precedence mark.");
psp->errorcnt++;
}
psp->state = WAITING_FOR_DECL_OR_RULE;
break;
case WAITING_FOR_ARROW:
if( x[0]==':' && x[1]==':' && x[2]=='=' ){
psp->state = IN_RHS;
}else if( x[0]=='(' ){
psp->state = LHS_ALIAS_1;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Expected to see a \":\" following the LHS symbol \"%s\".",
psp->lhs->name);
psp->errorcnt++;
psp->state = RESYNC_AFTER_RULE_ERROR;
}
break;
case LHS_ALIAS_1:
if( ISALPHA(x[0]) ){
psp->lhsalias = x;
psp->state = LHS_ALIAS_2;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"\"%s\" is not a valid alias for the LHS \"%s\"\n",
x,psp->lhs->name);
psp->errorcnt++;
psp->state = RESYNC_AFTER_RULE_ERROR;
}
break;
case LHS_ALIAS_2:
if( x[0]==')' ){
psp->state = LHS_ALIAS_3;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Missing \")\" following LHS alias name \"%s\".",psp->lhsalias);
psp->errorcnt++;
psp->state = RESYNC_AFTER_RULE_ERROR;
}
break;
case LHS_ALIAS_3:
if( x[0]==':' && x[1]==':' && x[2]=='=' ){
psp->state = IN_RHS;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Missing \"->\" following: \"%s(%s)\".",
psp->lhs->name,psp->lhsalias);
psp->errorcnt++;
psp->state = RESYNC_AFTER_RULE_ERROR;
}
break;
case IN_RHS:
if( x[0]=='.' ){
struct rule *rp;
rp = (struct rule *)calloc( sizeof(struct rule) +
sizeof(struct symbol*)*psp->nrhs + sizeof(char*)*psp->nrhs, 1);
if( rp==0 ){
ErrorMsg(psp->filename,psp->tokenlineno,
"Can't allocate enough memory for this rule.");
psp->errorcnt++;
psp->prevrule = 0;
}else{
int i;
rp->ruleline = psp->tokenlineno;
rp->rhs = (struct symbol**)&rp[1];
rp->rhsalias = (const char**)&(rp->rhs[psp->nrhs]);
for(i=0; i<psp->nrhs; i++){
rp->rhs[i] = psp->rhs[i];
rp->rhsalias[i] = psp->alias[i];
if( rp->rhsalias[i]!=0 ){ rp->rhs[i]->bContent = 1; }
}
rp->lhs = psp->lhs;
rp->lhsalias = psp->lhsalias;
rp->nrhs = psp->nrhs;
rp->code = 0;
rp->noCode = 1;
rp->precsym = 0;
rp->index = psp->gp->nrule++;
rp->nextlhs = rp->lhs->rule;
rp->lhs->rule = rp;
rp->next = 0;
if( psp->firstrule==0 ){
psp->firstrule = psp->lastrule = rp;
}else{
psp->lastrule->next = rp;
psp->lastrule = rp;
}
psp->prevrule = rp;
}
psp->state = WAITING_FOR_DECL_OR_RULE;
}else if( ISALPHA(x[0]) ){
if( psp->nrhs>=MAXRHS ){
ErrorMsg(psp->filename,psp->tokenlineno,
"Too many symbols on RHS of rule beginning at \"%s\".",
x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_RULE_ERROR;
}else{
psp->rhs[psp->nrhs] = Symbol_new(x);
psp->alias[psp->nrhs] = 0;
psp->nrhs++;
}
}else if( (x[0]=='|' || x[0]=='/') && psp->nrhs>0 && ISUPPER(x[1]) ){
struct symbol *msp = psp->rhs[psp->nrhs-1];
if( msp->type!=MULTITERMINAL ){
struct symbol *origsp = msp;
msp = (struct symbol *) calloc(1,sizeof(*msp));
memset(msp, 0, sizeof(*msp));
msp->type = MULTITERMINAL;
msp->nsubsym = 1;
msp->subsym = (struct symbol **) calloc(1,sizeof(struct symbol*));
msp->subsym[0] = origsp;
msp->name = origsp->name;
psp->rhs[psp->nrhs-1] = msp;
}
msp->nsubsym++;
msp->subsym = (struct symbol **) realloc(msp->subsym,
sizeof(struct symbol*)*msp->nsubsym);
msp->subsym[msp->nsubsym-1] = Symbol_new(&x[1]);
if( ISLOWER(x[1]) || ISLOWER(msp->subsym[0]->name[0]) ){
ErrorMsg(psp->filename,psp->tokenlineno,
"Cannot form a compound containing a non-terminal");
psp->errorcnt++;
}
}else if( x[0]=='(' && psp->nrhs>0 ){
psp->state = RHS_ALIAS_1;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Illegal character on RHS of rule: \"%s\".",x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_RULE_ERROR;
}
break;
case RHS_ALIAS_1:
if( ISALPHA(x[0]) ){
psp->alias[psp->nrhs-1] = x;
psp->state = RHS_ALIAS_2;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"\"%s\" is not a valid alias for the RHS symbol \"%s\"\n",
x,psp->rhs[psp->nrhs-1]->name);
psp->errorcnt++;
psp->state = RESYNC_AFTER_RULE_ERROR;
}
break;
case RHS_ALIAS_2:
if( x[0]==')' ){
psp->state = IN_RHS;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Missing \")\" following LHS alias name \"%s\".",psp->lhsalias);
psp->errorcnt++;
psp->state = RESYNC_AFTER_RULE_ERROR;
}
break;
case WAITING_FOR_DECL_KEYWORD:
if( ISALPHA(x[0]) ){
psp->declkeyword = x;
psp->declargslot = 0;
psp->decllinenoslot = 0;
psp->insertLineMacro = 1;
psp->state = WAITING_FOR_DECL_ARG;
if( strcmp(x,"name")==0 ){
psp->declargslot = &(psp->gp->name);
psp->insertLineMacro = 0;
}else if( strcmp(x,"include")==0 ){
psp->declargslot = &(psp->gp->include);
}else if( strcmp(x,"code")==0 ){
psp->declargslot = &(psp->gp->extracode);
}else if( strcmp(x,"token_destructor")==0 ){
psp->declargslot = &psp->gp->tokendest;
}else if( strcmp(x,"default_destructor")==0 ){
psp->declargslot = &psp->gp->vardest;
}else if( strcmp(x,"token_prefix")==0 ){
psp->declargslot = &psp->gp->tokenprefix;
psp->insertLineMacro = 0;
}else if( strcmp(x,"syntax_error")==0 ){
psp->declargslot = &(psp->gp->error);
}else if( strcmp(x,"parse_accept")==0 ){
psp->declargslot = &(psp->gp->accept);
}else if( strcmp(x,"parse_failure")==0 ){
psp->declargslot = &(psp->gp->failure);
}else if( strcmp(x,"stack_overflow")==0 ){
psp->declargslot = &(psp->gp->overflow);
}else if( strcmp(x,"extra_argument")==0 ){
psp->declargslot = &(psp->gp->arg);
psp->insertLineMacro = 0;
}else if( strcmp(x,"extra_context")==0 ){
psp->declargslot = &(psp->gp->ctx);
psp->insertLineMacro = 0;
}else if( strcmp(x,"token_type")==0 ){
psp->declargslot = &(psp->gp->tokentype);
psp->insertLineMacro = 0;
}else if( strcmp(x,"default_type")==0 ){
psp->declargslot = &(psp->gp->vartype);
psp->insertLineMacro = 0;
}else if( strcmp(x,"stack_size")==0 ){
psp->declargslot = &(psp->gp->stacksize);
psp->insertLineMacro = 0;
}else if( strcmp(x,"start_symbol")==0 ){
psp->declargslot = &(psp->gp->start);
psp->insertLineMacro = 0;
}else if( strcmp(x,"left")==0 ){
psp->preccounter++;
psp->declassoc = LEFT;
psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
}else if( strcmp(x,"right")==0 ){
psp->preccounter++;
psp->declassoc = RIGHT;
psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
}else if( strcmp(x,"nonassoc")==0 ){
psp->preccounter++;
psp->declassoc = NONE;
psp->state = WAITING_FOR_PRECEDENCE_SYMBOL;
}else if( strcmp(x,"destructor")==0 ){
psp->state = WAITING_FOR_DESTRUCTOR_SYMBOL;
}else if( strcmp(x,"type")==0 ){
psp->state = WAITING_FOR_DATATYPE_SYMBOL;
}else if( strcmp(x,"fallback")==0 ){
psp->fallback = 0;
psp->state = WAITING_FOR_FALLBACK_ID;
}else if( strcmp(x,"token")==0 ){
psp->state = WAITING_FOR_TOKEN_NAME;
}else if( strcmp(x,"wildcard")==0 ){
psp->state = WAITING_FOR_WILDCARD_ID;
}else if( strcmp(x,"token_class")==0 ){
psp->state = WAITING_FOR_CLASS_ID;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Unknown declaration keyword: \"%%%s\".",x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_DECL_ERROR;
}
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Illegal declaration keyword: \"%s\".",x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_DECL_ERROR;
}
break;
case WAITING_FOR_DESTRUCTOR_SYMBOL:
if( !ISALPHA(x[0]) ){
ErrorMsg(psp->filename,psp->tokenlineno,
"Symbol name missing after %%destructor keyword");
psp->errorcnt++;
psp->state = RESYNC_AFTER_DECL_ERROR;
}else{
struct symbol *sp = Symbol_new(x);
psp->declargslot = &sp->destructor;
psp->decllinenoslot = &sp->destLineno;
psp->insertLineMacro = 1;
psp->state = WAITING_FOR_DECL_ARG;
}
break;
case WAITING_FOR_DATATYPE_SYMBOL:
if( !ISALPHA(x[0]) ){
ErrorMsg(psp->filename,psp->tokenlineno,
"Symbol name missing after %%type keyword");
psp->errorcnt++;
psp->state = RESYNC_AFTER_DECL_ERROR;
}else{
struct symbol *sp = Symbol_find(x);
if((sp) && (sp->datatype)){
ErrorMsg(psp->filename,psp->tokenlineno,
"Symbol %%type \"%s\" already defined", x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_DECL_ERROR;
}else{
if (!sp){
sp = Symbol_new(x);
}
psp->declargslot = &sp->datatype;
psp->insertLineMacro = 0;
psp->state = WAITING_FOR_DECL_ARG;
}
}
break;
case WAITING_FOR_PRECEDENCE_SYMBOL:
if( x[0]=='.' ){
psp->state = WAITING_FOR_DECL_OR_RULE;
}else if( ISUPPER(x[0]) ){
struct symbol *sp;
sp = Symbol_new(x);
if( sp->prec>=0 ){
ErrorMsg(psp->filename,psp->tokenlineno,
"Symbol \"%s\" has already be given a precedence.",x);
psp->errorcnt++;
}else{
sp->prec = psp->preccounter;
sp->assoc = psp->declassoc;
}
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Can't assign a precedence to \"%s\".",x);
psp->errorcnt++;
}
break;
case WAITING_FOR_DECL_ARG:
if( x[0]=='{' || x[0]=='\"' || ISALNUM(x[0]) ){
const char *zOld, *zNew;
char *zBuf, *z;
int nOld, n, nLine = 0, nNew, nBack;
int addLineMacro;
char zLine[50];
zNew = x;
if( zNew[0]=='"' || zNew[0]=='{' ) zNew++;
nNew = lemonStrlen(zNew);
if( *psp->declargslot ){
zOld = *psp->declargslot;
}else{
zOld = "";
}
nOld = lemonStrlen(zOld);
n = nOld + nNew + 20;
addLineMacro = !psp->gp->nolinenosflag
&& psp->insertLineMacro
&& psp->tokenlineno>1
&& (psp->decllinenoslot==0 || psp->decllinenoslot[0]!=0);
if( addLineMacro ){
for(z=psp->filename, nBack=0; *z; z++){
if( *z=='\\' ) nBack++;
}
lemon_sprintf(zLine, "#line %d ", psp->tokenlineno);
nLine = lemonStrlen(zLine);
n += nLine + lemonStrlen(psp->filename) + nBack;
}
*psp->declargslot = (char *) realloc(*psp->declargslot, n);
zBuf = *psp->declargslot + nOld;
if( addLineMacro ){
if( nOld && zBuf[-1]!='\n' ){
*(zBuf++) = '\n';
}
memcpy(zBuf, zLine, nLine);
zBuf += nLine;
*(zBuf++) = '"';
for(z=psp->filename; *z; z++){
if( *z=='\\' ){
*(zBuf++) = '\\';
}
*(zBuf++) = *z;
}
*(zBuf++) = '"';
*(zBuf++) = '\n';
}
if( psp->decllinenoslot && psp->decllinenoslot[0]==0 ){
psp->decllinenoslot[0] = psp->tokenlineno;
}
memcpy(zBuf, zNew, nNew);
zBuf += nNew;
*zBuf = 0;
psp->state = WAITING_FOR_DECL_OR_RULE;
}else{
ErrorMsg(psp->filename,psp->tokenlineno,
"Illegal argument to %%%s: %s",psp->declkeyword,x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_DECL_ERROR;
}
break;
case WAITING_FOR_FALLBACK_ID:
if( x[0]=='.' ){
psp->state = WAITING_FOR_DECL_OR_RULE;
}else if( !ISUPPER(x[0]) ){
ErrorMsg(psp->filename, psp->tokenlineno,
"%%fallback argument \"%s\" should be a token", x);
psp->errorcnt++;
}else{
struct symbol *sp = Symbol_new(x);
if( psp->fallback==0 ){
psp->fallback = sp;
}else if( sp->fallback ){
ErrorMsg(psp->filename, psp->tokenlineno,
"More than one fallback assigned to token %s", x);
psp->errorcnt++;
}else{
sp->fallback = psp->fallback;
psp->gp->has_fallback = 1;
}
}
break;
case WAITING_FOR_TOKEN_NAME:
/* Tokens do not have to be declared before use. But they can be
** in order to control their assigned integer number. The number for
** each token is assigned when it is first seen. So by including
**
** %token ONE TWO THREE.
**
** early in the grammar file, that assigns small consecutive values
** to each of the tokens ONE TWO and THREE.
*/
if( x[0]=='.' ){
psp->state = WAITING_FOR_DECL_OR_RULE;
}else if( !ISUPPER(x[0]) ){
ErrorMsg(psp->filename, psp->tokenlineno,
"%%token argument \"%s\" should be a token", x);
psp->errorcnt++;
}else{
(void)Symbol_new(x);
}
break;
case WAITING_FOR_WILDCARD_ID:
if( x[0]=='.' ){
psp->state = WAITING_FOR_DECL_OR_RULE;
}else if( !ISUPPER(x[0]) ){
ErrorMsg(psp->filename, psp->tokenlineno,
"%%wildcard argument \"%s\" should be a token", x);
psp->errorcnt++;
}else{
struct symbol *sp = Symbol_new(x);
if( psp->gp->wildcard==0 ){
psp->gp->wildcard = sp;
}else{
ErrorMsg(psp->filename, psp->tokenlineno,
"Extra wildcard to token: %s", x);
psp->errorcnt++;
}
}
break;
case WAITING_FOR_CLASS_ID:
if( !ISLOWER(x[0]) ){
ErrorMsg(psp->filename, psp->tokenlineno,
"%%token_class must be followed by an identifier: %s", x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_DECL_ERROR;
}else if( Symbol_find(x) ){
ErrorMsg(psp->filename, psp->tokenlineno,
"Symbol \"%s\" already used", x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_DECL_ERROR;
}else{
psp->tkclass = Symbol_new(x);
psp->tkclass->type = MULTITERMINAL;
psp->state = WAITING_FOR_CLASS_TOKEN;
}
break;
case WAITING_FOR_CLASS_TOKEN:
if( x[0]=='.' ){
psp->state = WAITING_FOR_DECL_OR_RULE;
}else if( ISUPPER(x[0]) || ((x[0]=='|' || x[0]=='/') && ISUPPER(x[1])) ){
struct symbol *msp = psp->tkclass;
msp->nsubsym++;
msp->subsym = (struct symbol **) realloc(msp->subsym,
sizeof(struct symbol*)*msp->nsubsym);
if( !ISUPPER(x[0]) ) x++;
msp->subsym[msp->nsubsym-1] = Symbol_new(x);
}else{
ErrorMsg(psp->filename, psp->tokenlineno,
"%%token_class argument \"%s\" should be a token", x);
psp->errorcnt++;
psp->state = RESYNC_AFTER_DECL_ERROR;
}
break;
case RESYNC_AFTER_RULE_ERROR:
/* if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
** break; */
case RESYNC_AFTER_DECL_ERROR:
if( x[0]=='.' ) psp->state = WAITING_FOR_DECL_OR_RULE;
if( x[0]=='%' ) psp->state = WAITING_FOR_DECL_KEYWORD;
break;
}
}
/* The text in the input is part of the argument to an %ifdef or %ifndef.
** Evaluate the text as a boolean expression. Return true or false.
*/
static int eval_preprocessor_boolean(char *z, int lineno){
int neg = 0;
int res = 0;
int okTerm = 1;
int i;
for(i=0; z[i]!=0; i++){
if( ISSPACE(z[i]) ) continue;
if( z[i]=='!' ){
if( !okTerm ) goto pp_syntax_error;
neg = !neg;
continue;
}
if( z[i]=='|' && z[i+1]=='|' ){
if( okTerm ) goto pp_syntax_error;
if( res ) return 1;
i++;
okTerm = 1;
continue;
}
if( z[i]=='&' && z[i+1]=='&' ){
if( okTerm ) goto pp_syntax_error;
if( !res ) return 0;
i++;
okTerm = 1;
continue;
}
if( z[i]=='(' ){
int k;
int n = 1;
if( !okTerm ) goto pp_syntax_error;
for(k=i+1; z[k]; k++){
if( z[k]==')' ){
n--;
if( n==0 ){
z[k] = 0;
res = eval_preprocessor_boolean(&z[i+1], -1);
z[k] = ')';
if( res<0 ){
i = i-res;
goto pp_syntax_error;
}
i = k;
break;
}
}else if( z[k]=='(' ){
n++;
}else if( z[k]==0 ){
i = k;
goto pp_syntax_error;
}
}
if( neg ){
res = !res;
neg = 0;
}
okTerm = 0;
continue;
}
if( ISALPHA(z[i]) ){
int j, k, n;
if( !okTerm ) goto pp_syntax_error;
for(k=i+1; ISALNUM(z[k]) || z[k]=='_'; k++){}
n = k - i;
res = 0;
for(j=0; j<nDefine; j++){
if( strncmp(azDefine[j],&z[i],n)==0 && azDefine[j][n]==0 ){
res = 1;
break;
}
}
i = k-1;
if( neg ){
res = !res;
neg = 0;
}
okTerm = 0;
continue;
}
goto pp_syntax_error;
}
return res;
pp_syntax_error:
if( lineno>0 ){
fprintf(stderr, "%%if syntax error on line %d.\n", lineno);
fprintf(stderr, " %.*s <-- syntax error here\n", i+1, z);
exit(1);
}else{
return -(i+1);
}
}
/* Run the preprocessor over the input file text. The global variables
** azDefine[0] through azDefine[nDefine-1] contains the names of all defined
** macros. This routine looks for "%ifdef" and "%ifndef" and "%endif" and
** comments them out. Text in between is also commented out as appropriate.
*/
static void preprocess_input(char *z){
int i, j, k;
int exclude = 0;
int start = 0;
int lineno = 1;
int start_lineno = 1;
for(i=0; z[i]; i++){
if( z[i]=='\n' ) lineno++;
if( z[i]!='%' || (i>0 && z[i-1]!='\n') ) continue;
if( strncmp(&z[i],"%endif",6)==0 && ISSPACE(z[i+6]) ){
if( exclude ){
exclude--;
if( exclude==0 ){
for(j=start; j<i; j++) if( z[j]!='\n' ) z[j] = ' ';
}
}
for(j=i; z[j] && z[j]!='\n'; j++) z[j] = ' ';
}else if( strncmp(&z[i],"%else",5)==0 && ISSPACE(z[i+5]) ){
if( exclude==1){
exclude = 0;
for(j=start; j<i; j++) if( z[j]!='\n' ) z[j] = ' ';
}else if( exclude==0 ){
exclude = 1;
start = i;
start_lineno = lineno;
}
for(j=i; z[j] && z[j]!='\n'; j++) z[j] = ' ';
}else if( strncmp(&z[i],"%ifdef ",7)==0
|| strncmp(&z[i],"%if ",4)==0
|| strncmp(&z[i],"%ifndef ",8)==0 ){
if( exclude ){
exclude++;
}else{
int isNot;
int iBool;
for(j=i; z[j] && !ISSPACE(z[j]); j++){}
iBool = j;
isNot = (j==i+7);
while( z[j] && z[j]!='\n' ){ j++; }
k = z[j];
z[j] = 0;
exclude = eval_preprocessor_boolean(&z[iBool], lineno);
z[j] = k;
if( !isNot ) exclude = !exclude;
if( exclude ){
start = i;
start_lineno = lineno;
}
}
for(j=i; z[j] && z[j]!='\n'; j++) z[j] = ' ';
}
}
if( exclude ){
fprintf(stderr,"unterminated %%ifdef starting on line %d\n", start_lineno);
exit(1);
}
}
/* In spite of its name, this function is really a scanner. It read
** in the entire input file (all at once) then tokenizes it. Each
** token is passed to the function "parseonetoken" which builds all
** the appropriate data structures in the global state vector "gp".
*/
void Parse(struct lemon *gp)
{
struct pstate ps;
FILE *fp;
char *filebuf;
unsigned int filesize;
int lineno;
int c;
char *cp, *nextcp;
int startline = 0;
memset(&ps, '\0', sizeof(ps));
ps.gp = gp;
ps.filename = gp->filename;
ps.errorcnt = 0;
ps.state = INITIALIZE;
/* Begin by reading the input file */
fp = fopen(ps.filename,"rb");
if( fp==0 ){
ErrorMsg(ps.filename,0,"Can't open this file for reading.");
gp->errorcnt++;
return;
}
fseek(fp,0,2);
filesize = ftell(fp);
rewind(fp);
filebuf = (char *)malloc( filesize+1 );
if( filesize>100000000 || filebuf==0 ){
ErrorMsg(ps.filename,0,"Input file too large.");
free(filebuf);
gp->errorcnt++;
fclose(fp);
return;
}
if( fread(filebuf,1,filesize,fp)!=filesize ){
ErrorMsg(ps.filename,0,"Can't read in all %d bytes of this file.",
filesize);
free(filebuf);
gp->errorcnt++;
fclose(fp);
return;
}
fclose(fp);
filebuf[filesize] = 0;
/* Make an initial pass through the file to handle %ifdef and %ifndef */
preprocess_input(filebuf);
if( gp->printPreprocessed ){
printf("%s\n", filebuf);
return;
}
/* Now scan the text of the input file */
lineno = 1;
for(cp=filebuf; (c= *cp)!=0; ){
if( c=='\n' ) lineno++; /* Keep track of the line number */
if( ISSPACE(c) ){ cp++; continue; } /* Skip all white space */
if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments */
cp+=2;
while( (c= *cp)!=0 && c!='\n' ) cp++;
continue;
}
if( c=='/' && cp[1]=='*' ){ /* Skip C style comments */
cp+=2;
if( (*cp)=='/' ) cp++;
while( (c= *cp)!=0 && (c!='/' || cp[-1]!='*') ){
if( c=='\n' ) lineno++;
cp++;
}
if( c ) cp++;
continue;
}
ps.tokenstart = cp; /* Mark the beginning of the token */
ps.tokenlineno = lineno; /* Linenumber on which token begins */
if( c=='\"' ){ /* String literals */
cp++;
while( (c= *cp)!=0 && c!='\"' ){
if( c=='\n' ) lineno++;
cp++;
}
if( c==0 ){
ErrorMsg(ps.filename,startline,
"String starting on this line is not terminated before "
"the end of the file.");
ps.errorcnt++;
nextcp = cp;
}else{
nextcp = cp+1;
}
}else if( c=='{' ){ /* A block of C code */
int level;
cp++;
for(level=1; (c= *cp)!=0 && (level>1 || c!='}'); cp++){
if( c=='\n' ) lineno++;
else if( c=='{' ) level++;
else if( c=='}' ) level--;
else if( c=='/' && cp[1]=='*' ){ /* Skip comments */
int prevc;
cp = &cp[2];
prevc = 0;
while( (c= *cp)!=0 && (c!='/' || prevc!='*') ){
if( c=='\n' ) lineno++;
prevc = c;
cp++;
}
}else if( c=='/' && cp[1]=='/' ){ /* Skip C++ style comments too */
cp = &cp[2];
while( (c= *cp)!=0 && c!='\n' ) cp++;
if( c ) lineno++;
}else if( c=='\'' || c=='\"' ){ /* String a character literals */
int startchar, prevc;
startchar = c;
prevc = 0;
for(cp++; (c= *cp)!=0 && (c!=startchar || prevc=='\\'); cp++){
if( c=='\n' ) lineno++;
if( prevc=='\\' ) prevc = 0;
else prevc = c;
}
}
}
if( c==0 ){
ErrorMsg(ps.filename,ps.tokenlineno,
"C code starting on this line is not terminated before "
"the end of the file.");
ps.errorcnt++;
nextcp = cp;
}else{
nextcp = cp+1;
}
}else if( ISALNUM(c) ){ /* Identifiers */
while( (c= *cp)!=0 && (ISALNUM(c) || c=='_') ) cp++;
nextcp = cp;
}else if( c==':' && cp[1]==':' && cp[2]=='=' ){ /* The operator "::=" */
cp += 3;
nextcp = cp;
}else if( (c=='/' || c=='|') && ISALPHA(cp[1]) ){
cp += 2;
while( (c = *cp)!=0 && (ISALNUM(c) || c=='_') ) cp++;
nextcp = cp;
}else{ /* All other (one character) operators */
cp++;
nextcp = cp;
}
c = *cp;
*cp = 0; /* Null terminate the token */
parseonetoken(&ps); /* Parse the token */
*cp = (char)c; /* Restore the buffer */
cp = nextcp;
}
free(filebuf); /* Release the buffer after parsing */
gp->rule = ps.firstrule;
gp->errorcnt = ps.errorcnt;
}
/*************************** From the file "plink.c" *********************/
/*
** Routines processing configuration follow-set propagation links
** in the LEMON parser generator.
*/
static struct plink *plink_freelist = 0;
/* Allocate a new plink */
struct plink *Plink_new(void){
struct plink *newlink;
if( plink_freelist==0 ){
int i;
int amt = 100;
plink_freelist = (struct plink *)calloc( amt, sizeof(struct plink) );
if( plink_freelist==0 ){
fprintf(stderr,
"Unable to allocate memory for a new follow-set propagation link.\n");
exit(1);
}
for(i=0; i<amt-1; i++) plink_freelist[i].next = &plink_freelist[i+1];
plink_freelist[amt-1].next = 0;
}
newlink = plink_freelist;
plink_freelist = plink_freelist->next;
return newlink;
}
/* Add a plink to a plink list */
void Plink_add(struct plink **plpp, struct config *cfp)
{
struct plink *newlink;
newlink = Plink_new();
newlink->next = *plpp;
*plpp = newlink;
newlink->cfp = cfp;
}
/* Transfer every plink on the list "from" to the list "to" */
void Plink_copy(struct plink **to, struct plink *from)
{
struct plink *nextpl;
while( from ){
nextpl = from->next;
from->next = *to;
*to = from;
from = nextpl;
}
}
/* Delete every plink on the list */
void Plink_delete(struct plink *plp)
{
struct plink *nextpl;
while( plp ){
nextpl = plp->next;
plp->next = plink_freelist;
plink_freelist = plp;
plp = nextpl;
}
}
/*********************** From the file "report.c" **************************/
/*
** Procedures for generating reports and tables in the LEMON parser generator.
*/
/* Generate a filename with the given suffix. Space to hold the
** name comes from malloc() and must be freed by the calling
** function.
*/
PRIVATE char *file_makename(struct lemon *lemp, const char *suffix)
{
char *name;
char *cp;
char *filename = lemp->filename;
int sz;
if( outputDir ){
cp = strrchr(filename, '/');
if( cp ) filename = cp + 1;
}
sz = lemonStrlen(filename);
sz += lemonStrlen(suffix);
if( outputDir ) sz += lemonStrlen(outputDir) + 1;
sz += 5;
name = (char*)malloc( sz );
if( name==0 ){
fprintf(stderr,"Can't allocate space for a filename.\n");
exit(1);
}
name[0] = 0;
if( outputDir ){
lemon_strcpy(name, outputDir);
lemon_strcat(name, "/");
}
lemon_strcat(name,filename);
cp = strrchr(name,'.');
if( cp ) *cp = 0;
lemon_strcat(name,suffix);
return name;
}
/* Open a file with a name based on the name of the input file,
** but with a different (specified) suffix, and return a pointer
** to the stream */
PRIVATE FILE *file_open(
struct lemon *lemp,
const char *suffix,
const char *mode
){
FILE *fp;
if( lemp->outname ) free(lemp->outname);
lemp->outname = file_makename(lemp, suffix);
fp = fopen(lemp->outname,mode);
if( fp==0 && *mode=='w' ){
fprintf(stderr,"Can't open file \"%s\".\n",lemp->outname);
lemp->errorcnt++;
return 0;
}
return fp;
}
/* Print the text of a rule
*/
void rule_print(FILE *out, struct rule *rp){
int i, j;
fprintf(out, "%s",rp->lhs->name);
/* if( rp->lhsalias ) fprintf(out,"(%s)",rp->lhsalias); */
fprintf(out," ::=");
for(i=0; i<rp->nrhs; i++){
struct symbol *sp = rp->rhs[i];
if( sp->type==MULTITERMINAL ){
fprintf(out," %s", sp->subsym[0]->name);
for(j=1; j<sp->nsubsym; j++){
fprintf(out,"|%s", sp->subsym[j]->name);
}
}else{
fprintf(out," %s", sp->name);
}
/* if( rp->rhsalias[i] ) fprintf(out,"(%s)",rp->rhsalias[i]); */
}
}
/* Duplicate the input file without comments and without actions
** on rules */
void Reprint(struct lemon *lemp)
{
struct rule *rp;
struct symbol *sp;
int i, j, maxlen, len, ncolumns, skip;
printf("// Reprint of input file \"%s\".\n// Symbols:\n",lemp->filename);
maxlen = 10;
for(i=0; i<lemp->nsymbol; i++){
sp = lemp->symbols[i];
len = lemonStrlen(sp->name);
if( len>maxlen ) maxlen = len;
}
ncolumns = 76/(maxlen+5);
if( ncolumns<1 ) ncolumns = 1;
skip = (lemp->nsymbol + ncolumns - 1)/ncolumns;
for(i=0; i<skip; i++){
printf("//");
for(j=i; j<lemp->nsymbol; j+=skip){
sp = lemp->symbols[j];
assert( sp->index==j );
printf(" %3d %-*.*s",j,maxlen,maxlen,sp->name);
}
printf("\n");
}
for(rp=lemp->rule; rp; rp=rp->next){
rule_print(stdout, rp);
printf(".");
if( rp->precsym ) printf(" [%s]",rp->precsym->name);
/* if( rp->code ) printf("\n %s",rp->code); */
printf("\n");
}
}
/* Print a single rule.
*/
void RulePrint(FILE *fp, struct rule *rp, int iCursor){
struct symbol *sp;
int i, j;
fprintf(fp,"%s ::=",rp->lhs->name);
for(i=0; i<=rp->nrhs; i++){
if( i==iCursor ) fprintf(fp," *");
if( i==rp->nrhs ) break;
sp = rp->rhs[i];
if( sp->type==MULTITERMINAL ){
fprintf(fp," %s", sp->subsym[0]->name);
for(j=1; j<sp->nsubsym; j++){
fprintf(fp,"|%s",sp->subsym[j]->name);
}
}else{
fprintf(fp," %s", sp->name);
}
}
}
/* Print the rule for a configuration.
*/
void ConfigPrint(FILE *fp, struct config *cfp){
RulePrint(fp, cfp->rp, cfp->dot);
}
/* #define TEST */
#if 0
/* Print a set */
PRIVATE void SetPrint(out,set,lemp)
FILE *out;
char *set;
struct lemon *lemp;
{
int i;
char *spacer;
spacer = "";
fprintf(out,"%12s[","");
for(i=0; i<lemp->nterminal; i++){
if( SetFind(set,i) ){
fprintf(out,"%s%s",spacer,lemp->symbols[i]->name);
spacer = " ";
}
}
fprintf(out,"]\n");
}
/* Print a plink chain */
PRIVATE void PlinkPrint(out,plp,tag)
FILE *out;
struct plink *plp;
char *tag;
{
while( plp ){
fprintf(out,"%12s%s (state %2d) ","",tag,plp->cfp->stp->statenum);
ConfigPrint(out,plp->cfp);
fprintf(out,"\n");
plp = plp->next;
}
}
#endif
/* Print an action to the given file descriptor. Return FALSE if
** nothing was actually printed.
*/
int PrintAction(
struct action *ap, /* The action to print */
FILE *fp, /* Print the action here */
int indent /* Indent by this amount */
){
int result = 1;
switch( ap->type ){
case SHIFT: {
struct state *stp = ap->x.stp;
fprintf(fp,"%*s shift %-7d",indent,ap->sp->name,stp->statenum);
break;
}
case REDUCE: {
struct rule *rp = ap->x.rp;
fprintf(fp,"%*s reduce %-7d",indent,ap->sp->name,rp->iRule);
RulePrint(fp, rp, -1);
break;
}
case SHIFTREDUCE: {
struct rule *rp = ap->x.rp;
fprintf(fp,"%*s shift-reduce %-7d",indent,ap->sp->name,rp->iRule);
RulePrint(fp, rp, -1);
break;
}
case ACCEPT:
fprintf(fp,"%*s accept",indent,ap->sp->name);
break;
case ERROR:
fprintf(fp,"%*s error",indent,ap->sp->name);
break;
case SRCONFLICT:
case RRCONFLICT:
fprintf(fp,"%*s reduce %-7d ** Parsing conflict **",
indent,ap->sp->name,ap->x.rp->iRule);
break;
case SSCONFLICT:
fprintf(fp,"%*s shift %-7d ** Parsing conflict **",
indent,ap->sp->name,ap->x.stp->statenum);
break;
case SH_RESOLVED:
if( showPrecedenceConflict ){
fprintf(fp,"%*s shift %-7d -- dropped by precedence",
indent,ap->sp->name,ap->x.stp->statenum);
}else{
result = 0;
}
break;
case RD_RESOLVED:
if( showPrecedenceConflict ){
fprintf(fp,"%*s reduce %-7d -- dropped by precedence",
indent,ap->sp->name,ap->x.rp->iRule);
}else{
result = 0;
}
break;
case NOT_USED:
result = 0;
break;
}
if( result && ap->spOpt ){
fprintf(fp," /* because %s==%s */", ap->sp->name, ap->spOpt->name);
}
return result;
}
/* Generate the "*.out" log file */
void ReportOutput(struct lemon *lemp)
{
int i, n;
struct state *stp;
struct config *cfp;
struct action *ap;
struct rule *rp;
FILE *fp;
fp = file_open(lemp,".out","wb");
if( fp==0 ) return;
for(i=0; i<lemp->nxstate; i++){
stp = lemp->sorted[i];
fprintf(fp,"State %d:\n",stp->statenum);
if( lemp->basisflag ) cfp=stp->bp;
else cfp=stp->cfp;
while( cfp ){
char buf[20];
if( cfp->dot==cfp->rp->nrhs ){
lemon_sprintf(buf,"(%d)",cfp->rp->iRule);
fprintf(fp," %5s ",buf);
}else{
fprintf(fp," ");
}
ConfigPrint(fp,cfp);
fprintf(fp,"\n");
#if 0
SetPrint(fp,cfp->fws,lemp);
PlinkPrint(fp,cfp->fplp,"To ");
PlinkPrint(fp,cfp->bplp,"From");
#endif
if( lemp->basisflag ) cfp=cfp->bp;
else cfp=cfp->next;
}
fprintf(fp,"\n");
for(ap=stp->ap; ap; ap=ap->next){
if( PrintAction(ap,fp,30) ) fprintf(fp,"\n");
}
fprintf(fp,"\n");
}
fprintf(fp, "----------------------------------------------------\n");
fprintf(fp, "Symbols:\n");
fprintf(fp, "The first-set of non-terminals is shown after the name.\n\n");
for(i=0; i<lemp->nsymbol; i++){
int j;
struct symbol *sp;
sp = lemp->symbols[i];
fprintf(fp, " %3d: %s", i, sp->name);
if( sp->type==NONTERMINAL ){
fprintf(fp, ":");
if( sp->lambda ){
fprintf(fp, " <lambda>");
}
for(j=0; j<lemp->nterminal; j++){
if( sp->firstset && SetFind(sp->firstset, j) ){
fprintf(fp, " %s", lemp->symbols[j]->name);
}
}
}
if( sp->prec>=0 ) fprintf(fp," (precedence=%d)", sp->prec);
fprintf(fp, "\n");
}
fprintf(fp, "----------------------------------------------------\n");
fprintf(fp, "Syntax-only Symbols:\n");
fprintf(fp, "The following symbols never carry semantic content.\n\n");
for(i=n=0; i<lemp->nsymbol; i++){
int w;
struct symbol *sp = lemp->symbols[i];
if( sp->bContent ) continue;
w = (int)strlen(sp->name);
if( n>0 && n+w>75 ){
fprintf(fp,"\n");
n = 0;
}
if( n>0 ){
fprintf(fp, " ");
n++;
}
fprintf(fp, "%s", sp->name);
n += w;
}
if( n>0 ) fprintf(fp, "\n");
fprintf(fp, "----------------------------------------------------\n");
fprintf(fp, "Rules:\n");
for(rp=lemp->rule; rp; rp=rp->next){
fprintf(fp, "%4d: ", rp->iRule);
rule_print(fp, rp);
fprintf(fp,".");
if( rp->precsym ){
fprintf(fp," [%s precedence=%d]",
rp->precsym->name, rp->precsym->prec);
}
fprintf(fp,"\n");
}
fclose(fp);
return;
}
/* Search for the file "name" which is in the same directory as
** the executable */
PRIVATE char *pathsearch(char *argv0, char *name, int modemask)
{
const char *pathlist;
char *pathbufptr = 0;
char *pathbuf = 0;
char *path,*cp;
char c;
#ifdef __WIN32__
cp = strrchr(argv0,'\\');
#else
cp = strrchr(argv0,'/');
#endif
if( cp ){
c = *cp;
*cp = 0;
path = (char *)malloc( lemonStrlen(argv0) + lemonStrlen(name) + 2 );
if( path ) lemon_sprintf(path,"%s/%s",argv0,name);
*cp = c;
}else{
pathlist = getenv("PATH");
if( pathlist==0 ) pathlist = ".:/bin:/usr/bin";
pathbuf = (char *) malloc( lemonStrlen(pathlist) + 1 );
path = (char *)malloc( lemonStrlen(pathlist)+lemonStrlen(name)+2 );
if( (pathbuf != 0) && (path!=0) ){
pathbufptr = pathbuf;
lemon_strcpy(pathbuf, pathlist);
while( *pathbuf ){
cp = strchr(pathbuf,':');
if( cp==0 ) cp = &pathbuf[lemonStrlen(pathbuf)];
c = *cp;
*cp = 0;
lemon_sprintf(path,"%s/%s",pathbuf,name);
*cp = c;
if( c==0 ) pathbuf[0] = 0;
else pathbuf = &cp[1];
if( access(path,modemask)==0 ) break;
}
}
free(pathbufptr);
}
return path;
}
/* Given an action, compute the integer value for that action
** which is to be put in the action table of the generated machine.
** Return negative if no action should be generated.
*/
PRIVATE int compute_action(struct lemon *lemp, struct action *ap)
{
int act;
switch( ap->type ){
case SHIFT: act = ap->x.stp->statenum; break;
case SHIFTREDUCE: {
/* Since a SHIFT is inherient after a prior REDUCE, convert any
** SHIFTREDUCE action with a nonterminal on the LHS into a simple
** REDUCE action: */
if( ap->sp->index>=lemp->nterminal
&& (lemp->errsym==0 || ap->sp->index!=lemp->errsym->index)
){
act = lemp->minReduce + ap->x.rp->iRule;
}else{
act = lemp->minShiftReduce + ap->x.rp->iRule;
}
break;
}
case REDUCE: act = lemp->minReduce + ap->x.rp->iRule; break;
case ERROR: act = lemp->errAction; break;
case ACCEPT: act = lemp->accAction; break;
default: act = -1; break;
}
return act;
}
#define LINESIZE 1000
/* The next cluster of routines are for reading the template file
** and writing the results to the generated parser */
/* The first function transfers data from "in" to "out" until
** a line is seen which begins with "%%". The line number is
** tracked.
**
** if name!=0, then any word that begin with "Parse" is changed to
** begin with *name instead.
*/
PRIVATE void tplt_xfer(char *name, FILE *in, FILE *out, int *lineno)
{
int i, iStart;
char line[LINESIZE];
while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){
(*lineno)++;
iStart = 0;
if( name ){
for(i=0; line[i]; i++){
if( line[i]=='P' && strncmp(&line[i],"Parse",5)==0
&& (i==0 || !ISALPHA(line[i-1]))
){
if( i>iStart ) fprintf(out,"%.*s",i-iStart,&line[iStart]);
fprintf(out,"%s",name);
i += 4;
iStart = i+1;
}
}
}
fprintf(out,"%s",&line[iStart]);
}
}
/* Skip forward past the header of the template file to the first "%%"
*/
PRIVATE void tplt_skip_header(FILE *in, int *lineno)
{
char line[LINESIZE];
while( fgets(line,LINESIZE,in) && (line[0]!='%' || line[1]!='%') ){
(*lineno)++;
}
}
/* The next function finds the template file and opens it, returning
** a pointer to the opened file. */
PRIVATE FILE *tplt_open(struct lemon *lemp)
{
static char templatename[] = "lempar.c";
char buf[1000];
FILE *in;
char *tpltname;
char *toFree = 0;
char *cp;
/* first, see if user specified a template filename on the command line. */
if (user_templatename != 0) {
if( access(user_templatename,004)==-1 ){
fprintf(stderr,"Can't find the parser driver template file \"%s\".\n",
user_templatename);
lemp->errorcnt++;
return 0;
}
in = fopen(user_templatename,"rb");
if( in==0 ){
fprintf(stderr,"Can't open the template file \"%s\".\n",
user_templatename);
lemp->errorcnt++;
return 0;
}
return in;
}
cp = strrchr(lemp->filename,'.');
if( cp ){
lemon_sprintf(buf,"%.*s.lt",(int)(cp-lemp->filename),lemp->filename);
}else{
lemon_sprintf(buf,"%s.lt",lemp->filename);
}
if( access(buf,004)==0 ){
tpltname = buf;
}else if( access(templatename,004)==0 ){
tpltname = templatename;
}else{
toFree = tpltname = pathsearch(lemp->argv0,templatename,0);
}
if( tpltname==0 ){
fprintf(stderr,"Can't find the parser driver template file \"%s\".\n",
templatename);
lemp->errorcnt++;
return 0;
}
in = fopen(tpltname,"rb");
if( in==0 ){
fprintf(stderr,"Can't open the template file \"%s\".\n",tpltname);
lemp->errorcnt++;
}
free(toFree);
return in;
}
/* Print a #line directive line to the output file. */
PRIVATE void tplt_linedir(FILE *out, int lineno, char *filename)
{
fprintf(out,"#line %d \"",lineno);
while( *filename ){
if( *filename == '\\' ) putc('\\',out);
putc(*filename,out);
filename++;
}
fprintf(out,"\"\n");
}
/* Print a string to the file and keep the linenumber up to date */
PRIVATE void tplt_print(FILE *out, struct lemon *lemp, char *str, int *lineno)
{
if( str==0 ) return;
while( *str ){
putc(*str,out);
if( *str=='\n' ) (*lineno)++;
str++;
}
if( str[-1]!='\n' ){
putc('\n',out);
(*lineno)++;
}
if (!lemp->nolinenosflag) {
(*lineno)++; tplt_linedir(out,*lineno,lemp->outname);
}
return;
}
/*
** The following routine emits code for the destructor for the
** symbol sp
*/
void emit_destructor_code(
FILE *out,
struct symbol *sp,
struct lemon *lemp,
int *lineno
){
char *cp = 0;
if( sp->type==TERMINAL ){
cp = lemp->tokendest;
if( cp==0 ) return;
fprintf(out,"{\n"); (*lineno)++;
}else if( sp->destructor ){
cp = sp->destructor;
fprintf(out,"{\n"); (*lineno)++;
if( !lemp->nolinenosflag ){
(*lineno)++;
tplt_linedir(out,sp->destLineno,lemp->filename);
}
}else if( lemp->vardest ){
cp = lemp->vardest;
if( cp==0 ) return;
fprintf(out,"{\n"); (*lineno)++;
}else{
assert( 0 ); /* Cannot happen */
}
for(; *cp; cp++){
if( *cp=='$' && cp[1]=='$' ){
fprintf(out,"(yypminor->yy%d)",sp->dtnum);
cp++;
continue;
}
if( *cp=='\n' ) (*lineno)++;
fputc(*cp,out);
}
fprintf(out,"\n"); (*lineno)++;
if (!lemp->nolinenosflag) {
(*lineno)++; tplt_linedir(out,*lineno,lemp->outname);
}
fprintf(out,"}\n"); (*lineno)++;
return;
}
/*
** Return TRUE (non-zero) if the given symbol has a destructor.
*/
int has_destructor(struct symbol *sp, struct lemon *lemp)
{
int ret;
if( sp->type==TERMINAL ){
ret = lemp->tokendest!=0;
}else{
ret = lemp->vardest!=0 || sp->destructor!=0;
}
return ret;
}
/*
** Append text to a dynamically allocated string. If zText is 0 then
** reset the string to be empty again. Always return the complete text
** of the string (which is overwritten with each call).
**
** n bytes of zText are stored. If n==0 then all of zText up to the first
** \000 terminator is stored. zText can contain up to two instances of
** %d. The values of p1 and p2 are written into the first and second
** %d.
**
** If n==-1, then the previous character is overwritten.
*/
PRIVATE char *append_str(const char *zText, int n, int p1, int p2){
static char empty[1] = { 0 };
static char *z = 0;
static int alloced = 0;
static int used = 0;
int c;
char zInt[40];
if( zText==0 ){
if( used==0 && z!=0 ) z[0] = 0;
used = 0;
return z;
}
if( n<=0 ){
if( n<0 ){
used += n;
assert( used>=0 );
}
n = lemonStrlen(zText);
}
if( (int) (n+sizeof(zInt)*2+used) >= alloced ){
alloced = n + sizeof(zInt)*2 + used + 200;
z = (char *) realloc(z, alloced);
}
if( z==0 ) return empty;
while( n-- > 0 ){
c = *(zText++);
if( c=='%' && n>0 && zText[0]=='d' ){
lemon_sprintf(zInt, "%d", p1);
p1 = p2;
lemon_strcpy(&z[used], zInt);
used += lemonStrlen(&z[used]);
zText++;
n--;
}else{
z[used++] = (char)c;
}
}
z[used] = 0;
return z;
}
/*
** Write and transform the rp->code string so that symbols are expanded.
** Populate the rp->codePrefix and rp->codeSuffix strings, as appropriate.
**
** Return 1 if the expanded code requires that "yylhsminor" local variable
** to be defined.
*/
PRIVATE int translate_code(struct lemon *lemp, struct rule *rp){
char *cp, *xp;
int i;
int rc = 0; /* True if yylhsminor is used */
int dontUseRhs0 = 0; /* If true, use of left-most RHS label is illegal */
const char *zSkip = 0; /* The zOvwrt comment within rp->code, or NULL */
char lhsused = 0; /* True if the LHS element has been used */
char lhsdirect; /* True if LHS writes directly into stack */
char used[MAXRHS]; /* True for each RHS element which is used */
char zLhs[50]; /* Convert the LHS symbol into this string */
char zOvwrt[900]; /* Comment that to allow LHS to overwrite RHS */
for(i=0; i<rp->nrhs; i++) used[i] = 0;
lhsused = 0;
if( rp->code==0 ){
static char newlinestr[2] = { '\n', '\0' };
rp->code = newlinestr;
rp->line = rp->ruleline;
rp->noCode = 1;
}else{
rp->noCode = 0;
}
if( rp->nrhs==0 ){
/* If there are no RHS symbols, then writing directly to the LHS is ok */
lhsdirect = 1;
}else if( rp->rhsalias[0]==0 ){
/* The left-most RHS symbol has no value. LHS direct is ok. But
** we have to call the destructor on the RHS symbol first. */
lhsdirect = 1;
if( has_destructor(rp->rhs[0],lemp) ){
append_str(0,0,0,0);
append_str(" yy_destructor(yypParser,%d,&yymsp[%d].minor);\n", 0,
rp->rhs[0]->index,1-rp->nrhs);
rp->codePrefix = Strsafe(append_str(0,0,0,0));
rp->noCode = 0;
}
}else if( rp->lhsalias==0 ){
/* There is no LHS value symbol. */
lhsdirect = 1;
}else if( strcmp(rp->lhsalias,rp->rhsalias[0])==0 ){
/* The LHS symbol and the left-most RHS symbol are the same, so
** direct writing is allowed */
lhsdirect = 1;
lhsused = 1;
used[0] = 1;
if( rp->lhs->dtnum!=rp->rhs[0]->dtnum ){
ErrorMsg(lemp->filename,rp->ruleline,
"%s(%s) and %s(%s) share the same label but have "
"different datatypes.",
rp->lhs->name, rp->lhsalias, rp->rhs[0]->name, rp->rhsalias[0]);
lemp->errorcnt++;
}
}else{
lemon_sprintf(zOvwrt, "/*%s-overwrites-%s*/",
rp->lhsalias, rp->rhsalias[0]);
zSkip = strstr(rp->code, zOvwrt);
if( zSkip!=0 ){
/* The code contains a special comment that indicates that it is safe
** for the LHS label to overwrite left-most RHS label. */
lhsdirect = 1;
}else{
lhsdirect = 0;
}
}
if( lhsdirect ){
sprintf(zLhs, "yymsp[%d].minor.yy%d",1-rp->nrhs,rp->lhs->dtnum);
}else{
rc = 1;
sprintf(zLhs, "yylhsminor.yy%d",rp->lhs->dtnum);
}
append_str(0,0,0,0);
/* This const cast is wrong but harmless, if we're careful. */
for(cp=(char *)rp->code; *cp; cp++){
if( cp==zSkip ){
append_str(zOvwrt,0,0,0);
cp += lemonStrlen(zOvwrt)-1;
dontUseRhs0 = 1;
continue;
}
if( ISALPHA(*cp) && (cp==rp->code || (!ISALNUM(cp[-1]) && cp[-1]!='_')) ){
char saved;
for(xp= &cp[1]; ISALNUM(*xp) || *xp=='_'; xp++);
saved = *xp;
*xp = 0;
if( rp->lhsalias && strcmp(cp,rp->lhsalias)==0 ){
append_str(zLhs,0,0,0);
cp = xp;
lhsused = 1;
}else{
for(i=0; i<rp->nrhs; i++){
if( rp->rhsalias[i] && strcmp(cp,rp->rhsalias[i])==0 ){
if( i==0 && dontUseRhs0 ){
ErrorMsg(lemp->filename,rp->ruleline,
"Label %s used after '%s'.",
rp->rhsalias[0], zOvwrt);
lemp->errorcnt++;
}else if( cp!=rp->code && cp[-1]=='@' ){
/* If the argument is of the form @X then substituted
** the token number of X, not the value of X */
append_str("yymsp[%d].major",-1,i-rp->nrhs+1,0);
}else{
struct symbol *sp = rp->rhs[i];
int dtnum;
if( sp->type==MULTITERMINAL ){
dtnum = sp->subsym[0]->dtnum;
}else{
dtnum = sp->dtnum;
}
append_str("yymsp[%d].minor.yy%d",0,i-rp->nrhs+1, dtnum);
}
cp = xp;
used[i] = 1;
break;
}
}
}
*xp = saved;
}
append_str(cp, 1, 0, 0);
} /* End loop */
/* Main code generation completed */
cp = append_str(0,0,0,0);
if( cp && cp[0] ) rp->code = Strsafe(cp);
append_str(0,0,0,0);
/* Check to make sure the LHS has been used */
if( rp->lhsalias && !lhsused ){
ErrorMsg(lemp->filename,rp->ruleline,
"Label \"%s\" for \"%s(%s)\" is never used.",
rp->lhsalias,rp->lhs->name,rp->lhsalias);
lemp->errorcnt++;
}
/* Generate destructor code for RHS minor values which are not referenced.
** Generate error messages for unused labels and duplicate labels.
*/
for(i=0; i<rp->nrhs; i++){
if( rp->rhsalias[i] ){
if( i>0 ){
int j;
if( rp->lhsalias && strcmp(rp->lhsalias,rp->rhsalias[i])==0 ){
ErrorMsg(lemp->filename,rp->ruleline,
"%s(%s) has the same label as the LHS but is not the left-most "
"symbol on the RHS.",
rp->rhs[i]->name, rp->rhsalias[i]);
lemp->errorcnt++;
}
for(j=0; j<i; j++){
if( rp->rhsalias[j] && strcmp(rp->rhsalias[j],rp->rhsalias[i])==0 ){
ErrorMsg(lemp->filename,rp->ruleline,
"Label %s used for multiple symbols on the RHS of a rule.",
rp->rhsalias[i]);
lemp->errorcnt++;
break;
}
}
}
if( !used[i] ){
ErrorMsg(lemp->filename,rp->ruleline,
"Label %s for \"%s(%s)\" is never used.",
rp->rhsalias[i],rp->rhs[i]->name,rp->rhsalias[i]);
lemp->errorcnt++;
}
}else if( i>0 && has_destructor(rp->rhs[i],lemp) ){
append_str(" yy_destructor(yypParser,%d,&yymsp[%d].minor);\n", 0,
rp->rhs[i]->index,i-rp->nrhs+1);
}
}
/* If unable to write LHS values directly into the stack, write the
** saved LHS value now. */
if( lhsdirect==0 ){
append_str(" yymsp[%d].minor.yy%d = ", 0, 1-rp->nrhs, rp->lhs->dtnum);
append_str(zLhs, 0, 0, 0);
append_str(";\n", 0, 0, 0);
}
/* Suffix code generation complete */
cp = append_str(0,0,0,0);
if( cp && cp[0] ){
rp->codeSuffix = Strsafe(cp);
rp->noCode = 0;
}
return rc;
}
/*
** Generate code which executes when the rule "rp" is reduced. Write
** the code to "out". Make sure lineno stays up-to-date.
*/
PRIVATE void emit_code(
FILE *out,
struct rule *rp,
struct lemon *lemp,
int *lineno
){
const char *cp;
/* Setup code prior to the #line directive */
if( rp->codePrefix && rp->codePrefix[0] ){
fprintf(out, "{%s", rp->codePrefix);
for(cp=rp->codePrefix; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; }
}
/* Generate code to do the reduce action */
if( rp->code ){
if( !lemp->nolinenosflag ){
(*lineno)++;
tplt_linedir(out,rp->line,lemp->filename);
}
fprintf(out,"{%s",rp->code);
for(cp=rp->code; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; }
fprintf(out,"}\n"); (*lineno)++;
if( !lemp->nolinenosflag ){
(*lineno)++;
tplt_linedir(out,*lineno,lemp->outname);
}
}
/* Generate breakdown code that occurs after the #line directive */
if( rp->codeSuffix && rp->codeSuffix[0] ){
fprintf(out, "%s", rp->codeSuffix);
for(cp=rp->codeSuffix; *cp; cp++){ if( *cp=='\n' ) (*lineno)++; }
}
if( rp->codePrefix ){
fprintf(out, "}\n"); (*lineno)++;
}
return;
}
/*
** Print the definition of the union used for the parser's data stack.
** This union contains fields for every possible data type for tokens
** and nonterminals. In the process of computing and printing this
** union, also set the ".dtnum" field of every terminal and nonterminal
** symbol.
*/
void print_stack_union(
FILE *out, /* The output stream */
struct lemon *lemp, /* The main info structure for this parser */
int *plineno, /* Pointer to the line number */
int mhflag /* True if generating makeheaders output */
){
int lineno; /* The line number of the output */
char **types; /* A hash table of datatypes */
int arraysize; /* Size of the "types" array */
int maxdtlength; /* Maximum length of any ".datatype" field. */
char *stddt; /* Standardized name for a datatype */
int i,j; /* Loop counters */
unsigned hash; /* For hashing the name of a type */
const char *name; /* Name of the parser */
/* Allocate and initialize types[] and allocate stddt[] */
arraysize = lemp->nsymbol * 2;
types = (char**)calloc( arraysize, sizeof(char*) );
if( types==0 ){
fprintf(stderr,"Out of memory.\n");
exit(1);
}
for(i=0; i<arraysize; i++) types[i] = 0;
maxdtlength = 0;
if( lemp->vartype ){
maxdtlength = lemonStrlen(lemp->vartype);
}
for(i=0; i<lemp->nsymbol; i++){
int len;
struct symbol *sp = lemp->symbols[i];
if( sp->datatype==0 ) continue;
len = lemonStrlen(sp->datatype);
if( len>maxdtlength ) maxdtlength = len;
}
stddt = (char*)malloc( maxdtlength*2 + 1 );
if( stddt==0 ){
fprintf(stderr,"Out of memory.\n");
exit(1);
}
/* Build a hash table of datatypes. The ".dtnum" field of each symbol
** is filled in with the hash index plus 1. A ".dtnum" value of 0 is
** used for terminal symbols. If there is no %default_type defined then
** 0 is also used as the .dtnum value for nonterminals which do not specify
** a datatype using the %type directive.
*/
for(i=0; i<lemp->nsymbol; i++){
struct symbol *sp = lemp->symbols[i];
char *cp;
if( sp==lemp->errsym ){
sp->dtnum = arraysize+1;
continue;
}
if( sp->type!=NONTERMINAL || (sp->datatype==0 && lemp->vartype==0) ){
sp->dtnum = 0;
continue;
}
cp = sp->datatype;
if( cp==0 ) cp = lemp->vartype;
j = 0;
while( ISSPACE(*cp) ) cp++;
while( *cp ) stddt[j++] = *cp++;
while( j>0 && ISSPACE(stddt[j-1]) ) j--;
stddt[j] = 0;
if( lemp->tokentype && strcmp(stddt, lemp->tokentype)==0 ){
sp->dtnum = 0;
continue;
}
hash = 0;
for(j=0; stddt[j]; j++){
hash = hash*53 + stddt[j];
}
hash = (hash & 0x7fffffff)%arraysize;
while( types[hash] ){
if( strcmp(types[hash],stddt)==0 ){
sp->dtnum = hash + 1;
break;
}
hash++;
if( hash>=(unsigned)arraysize ) hash = 0;
}
if( types[hash]==0 ){
sp->dtnum = hash + 1;
types[hash] = (char*)malloc( lemonStrlen(stddt)+1 );
if( types[hash]==0 ){
fprintf(stderr,"Out of memory.\n");
exit(1);
}
lemon_strcpy(types[hash],stddt);
}
}
/* Print out the definition of YYTOKENTYPE and YYMINORTYPE */
name = lemp->name ? lemp->name : "Parse";
lineno = *plineno;
if( mhflag ){ fprintf(out,"#if INTERFACE\n"); lineno++; }
fprintf(out,"#define %sTOKENTYPE %s\n",name,
lemp->tokentype?lemp->tokentype:"void*"); lineno++;
if( mhflag ){ fprintf(out,"#endif\n"); lineno++; }
fprintf(out,"typedef union {\n"); lineno++;
fprintf(out," int yyinit;\n"); lineno++;
fprintf(out," %sTOKENTYPE yy0;\n",name); lineno++;
for(i=0; i<arraysize; i++){
if( types[i]==0 ) continue;
fprintf(out," %s yy%d;\n",types[i],i+1); lineno++;
free(types[i]);
}
if( lemp->errsym && lemp->errsym->useCnt ){
fprintf(out," int yy%d;\n",lemp->errsym->dtnum); lineno++;
}
free(stddt);
free(types);
fprintf(out,"} YYMINORTYPE;\n"); lineno++;
*plineno = lineno;
}
/*
** Return the name of a C datatype able to represent values between
** lwr and upr, inclusive. If pnByte!=NULL then also write the sizeof
** for that type (1, 2, or 4) into *pnByte.
*/
static const char *minimum_size_type(int lwr, int upr, int *pnByte){
const char *zType = "int";
int nByte = 4;
if( lwr>=0 ){
if( upr<=255 ){
zType = "unsigned char";
nByte = 1;
}else if( upr<65535 ){
zType = "unsigned short int";
nByte = 2;
}else{
zType = "unsigned int";
nByte = 4;
}
}else if( lwr>=-127 && upr<=127 ){
zType = "signed char";
nByte = 1;
}else if( lwr>=-32767 && upr<32767 ){
zType = "short";
nByte = 2;
}
if( pnByte ) *pnByte = nByte;
return zType;
}
/*
** Each state contains a set of token transaction and a set of
** nonterminal transactions. Each of these sets makes an instance
** of the following structure. An array of these structures is used
** to order the creation of entries in the yy_action[] table.
*/
struct axset {
struct state *stp; /* A pointer to a state */
int isTkn; /* True to use tokens. False for non-terminals */
int nAction; /* Number of actions */
int iOrder; /* Original order of action sets */
};
/*
** Compare to axset structures for sorting purposes
*/
static int axset_compare(const void *a, const void *b){
struct axset *p1 = (struct axset*)a;
struct axset *p2 = (struct axset*)b;
int c;
c = p2->nAction - p1->nAction;
if( c==0 ){
c = p1->iOrder - p2->iOrder;
}
assert( c!=0 || p1==p2 );
return c;
}
/*
** Write text on "out" that describes the rule "rp".
*/
static void writeRuleText(FILE *out, struct rule *rp){
int j;
fprintf(out,"%s ::=", rp->lhs->name);
for(j=0; j<rp->nrhs; j++){
struct symbol *sp = rp->rhs[j];
if( sp->type!=MULTITERMINAL ){
fprintf(out," %s", sp->name);
}else{
int k;
fprintf(out," %s", sp->subsym[0]->name);
for(k=1; k<sp->nsubsym; k++){
fprintf(out,"|%s",sp->subsym[k]->name);
}
}
}
}
/* Generate C source code for the parser */
void ReportTable(
struct lemon *lemp,
int mhflag, /* Output in makeheaders format if true */
int sqlFlag /* Generate the *.sql file too */
){
FILE *out, *in, *sql;
int lineno;
struct state *stp;
struct action *ap;
struct rule *rp;
struct acttab *pActtab;
int i, j, n, sz;
int nLookAhead;
int szActionType; /* sizeof(YYACTIONTYPE) */
int szCodeType; /* sizeof(YYCODETYPE) */
const char *name;
int mnTknOfst, mxTknOfst;
int mnNtOfst, mxNtOfst;
struct axset *ax;
char *prefix;
lemp->minShiftReduce = lemp->nstate;
lemp->errAction = lemp->minShiftReduce + lemp->nrule;
lemp->accAction = lemp->errAction + 1;
lemp->noAction = lemp->accAction + 1;
lemp->minReduce = lemp->noAction + 1;
lemp->maxAction = lemp->minReduce + lemp->nrule;
in = tplt_open(lemp);
if( in==0 ) return;
out = file_open(lemp,".c","wb");
if( out==0 ){
fclose(in);
return;
}
if( sqlFlag==0 ){
sql = 0;
}else{
sql = file_open(lemp, ".sql", "wb");
if( sql==0 ){
fclose(in);
fclose(out);
return;
}
fprintf(sql,
"BEGIN;\n"
"CREATE TABLE symbol(\n"
" id INTEGER PRIMARY KEY,\n"
" name TEXT NOT NULL,\n"
" isTerminal BOOLEAN NOT NULL,\n"
" fallback INTEGER REFERENCES symbol"
" DEFERRABLE INITIALLY DEFERRED\n"
");\n"
);
for(i=0; i<lemp->nsymbol; i++){
fprintf(sql,
"INSERT INTO symbol(id,name,isTerminal,fallback)"
"VALUES(%d,'%s',%s",
i, lemp->symbols[i]->name,
i<lemp->nterminal ? "TRUE" : "FALSE"
);
if( lemp->symbols[i]->fallback ){
fprintf(sql, ",%d);\n", lemp->symbols[i]->fallback->index);
}else{
fprintf(sql, ",NULL);\n");
}
}
fprintf(sql,
"CREATE TABLE rule(\n"
" ruleid INTEGER PRIMARY KEY,\n"
" lhs INTEGER REFERENCES symbol(id),\n"
" txt TEXT\n"
");\n"
"CREATE TABLE rulerhs(\n"
" ruleid INTEGER REFERENCES rule(ruleid),\n"
" pos INTEGER,\n"
" sym INTEGER REFERENCES symbol(id)\n"
");\n"
);
for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
assert( i==rp->iRule );
fprintf(sql,
"INSERT INTO rule(ruleid,lhs,txt)VALUES(%d,%d,'",
rp->iRule, rp->lhs->index
);
writeRuleText(sql, rp);
fprintf(sql,"');\n");
for(j=0; j<rp->nrhs; j++){
struct symbol *sp = rp->rhs[j];
if( sp->type!=MULTITERMINAL ){
fprintf(sql,
"INSERT INTO rulerhs(ruleid,pos,sym)VALUES(%d,%d,%d);\n",
i,j,sp->index
);
}else{
int k;
for(k=0; k<sp->nsubsym; k++){
fprintf(sql,
"INSERT INTO rulerhs(ruleid,pos,sym)VALUES(%d,%d,%d);\n",
i,j,sp->subsym[k]->index
);
}
}
}
}
fprintf(sql, "COMMIT;\n");
}
lineno = 1;
fprintf(out,
"/* This file is automatically generated by Lemon from input grammar\n"
"** source file \"%s\". */\n", lemp->filename); lineno += 2;
/* The first %include directive begins with a C-language comment,
** then skip over the header comment of the template file
*/
if( lemp->include==0 ) lemp->include = "";
for(i=0; ISSPACE(lemp->include[i]); i++){
if( lemp->include[i]=='\n' ){
lemp->include += i+1;
i = -1;
}
}
if( lemp->include[0]=='/' ){
tplt_skip_header(in,&lineno);
}else{
tplt_xfer(lemp->name,in,out,&lineno);
}
/* Generate the include code, if any */
tplt_print(out,lemp,lemp->include,&lineno);
if( mhflag ){
char *incName = file_makename(lemp, ".h");
fprintf(out,"#include \"%s\"\n", incName); lineno++;
free(incName);
}
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate #defines for all tokens */
if( lemp->tokenprefix ) prefix = lemp->tokenprefix;
else prefix = "";
if( mhflag ){
fprintf(out,"#if INTERFACE\n"); lineno++;
}else{
fprintf(out,"#ifndef %s%s\n", prefix, lemp->symbols[1]->name);
}
for(i=1; i<lemp->nterminal; i++){
fprintf(out,"#define %s%-30s %2d\n",prefix,lemp->symbols[i]->name,i);
lineno++;
}
fprintf(out,"#endif\n"); lineno++;
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate the defines */
fprintf(out,"#define YYCODETYPE %s\n",
minimum_size_type(0, lemp->nsymbol, &szCodeType)); lineno++;
fprintf(out,"#define YYNOCODE %d\n",lemp->nsymbol); lineno++;
fprintf(out,"#define YYACTIONTYPE %s\n",
minimum_size_type(0,lemp->maxAction,&szActionType)); lineno++;
if( lemp->wildcard ){
fprintf(out,"#define YYWILDCARD %d\n",
lemp->wildcard->index); lineno++;
}
print_stack_union(out,lemp,&lineno,mhflag);
fprintf(out, "#ifndef YYSTACKDEPTH\n"); lineno++;
if( lemp->stacksize ){
fprintf(out,"#define YYSTACKDEPTH %s\n",lemp->stacksize); lineno++;
}else{
fprintf(out,"#define YYSTACKDEPTH 100\n"); lineno++;
}
fprintf(out, "#endif\n"); lineno++;
if( mhflag ){
fprintf(out,"#if INTERFACE\n"); lineno++;
}
name = lemp->name ? lemp->name : "Parse";
if( lemp->arg && lemp->arg[0] ){
i = lemonStrlen(lemp->arg);
while( i>=1 && ISSPACE(lemp->arg[i-1]) ) i--;
while( i>=1 && (ISALNUM(lemp->arg[i-1]) || lemp->arg[i-1]=='_') ) i--;
fprintf(out,"#define %sARG_SDECL %s;\n",name,lemp->arg); lineno++;
fprintf(out,"#define %sARG_PDECL ,%s\n",name,lemp->arg); lineno++;
fprintf(out,"#define %sARG_PARAM ,%s\n",name,&lemp->arg[i]); lineno++;
fprintf(out,"#define %sARG_FETCH %s=yypParser->%s;\n",
name,lemp->arg,&lemp->arg[i]); lineno++;
fprintf(out,"#define %sARG_STORE yypParser->%s=%s;\n",
name,&lemp->arg[i],&lemp->arg[i]); lineno++;
}else{
fprintf(out,"#define %sARG_SDECL\n",name); lineno++;
fprintf(out,"#define %sARG_PDECL\n",name); lineno++;
fprintf(out,"#define %sARG_PARAM\n",name); lineno++;
fprintf(out,"#define %sARG_FETCH\n",name); lineno++;
fprintf(out,"#define %sARG_STORE\n",name); lineno++;
}
if( lemp->ctx && lemp->ctx[0] ){
i = lemonStrlen(lemp->ctx);
while( i>=1 && ISSPACE(lemp->ctx[i-1]) ) i--;
while( i>=1 && (ISALNUM(lemp->ctx[i-1]) || lemp->ctx[i-1]=='_') ) i--;
fprintf(out,"#define %sCTX_SDECL %s;\n",name,lemp->ctx); lineno++;
fprintf(out,"#define %sCTX_PDECL ,%s\n",name,lemp->ctx); lineno++;
fprintf(out,"#define %sCTX_PARAM ,%s\n",name,&lemp->ctx[i]); lineno++;
fprintf(out,"#define %sCTX_FETCH %s=yypParser->%s;\n",
name,lemp->ctx,&lemp->ctx[i]); lineno++;
fprintf(out,"#define %sCTX_STORE yypParser->%s=%s;\n",
name,&lemp->ctx[i],&lemp->ctx[i]); lineno++;
}else{
fprintf(out,"#define %sCTX_SDECL\n",name); lineno++;
fprintf(out,"#define %sCTX_PDECL\n",name); lineno++;
fprintf(out,"#define %sCTX_PARAM\n",name); lineno++;
fprintf(out,"#define %sCTX_FETCH\n",name); lineno++;
fprintf(out,"#define %sCTX_STORE\n",name); lineno++;
}
if( mhflag ){
fprintf(out,"#endif\n"); lineno++;
}
if( lemp->errsym && lemp->errsym->useCnt ){
fprintf(out,"#define YYERRORSYMBOL %d\n",lemp->errsym->index); lineno++;
fprintf(out,"#define YYERRSYMDT yy%d\n",lemp->errsym->dtnum); lineno++;
}
if( lemp->has_fallback ){
fprintf(out,"#define YYFALLBACK 1\n"); lineno++;
}
/* Compute the action table, but do not output it yet. The action
** table must be computed before generating the YYNSTATE macro because
** we need to know how many states can be eliminated.
*/
ax = (struct axset *) calloc(lemp->nxstate*2, sizeof(ax[0]));
if( ax==0 ){
fprintf(stderr,"malloc failed\n");
exit(1);
}
for(i=0; i<lemp->nxstate; i++){
stp = lemp->sorted[i];
ax[i*2].stp = stp;
ax[i*2].isTkn = 1;
ax[i*2].nAction = stp->nTknAct;
ax[i*2+1].stp = stp;
ax[i*2+1].isTkn = 0;
ax[i*2+1].nAction = stp->nNtAct;
}
mxTknOfst = mnTknOfst = 0;
mxNtOfst = mnNtOfst = 0;
/* In an effort to minimize the action table size, use the heuristic
** of placing the largest action sets first */
for(i=0; i<lemp->nxstate*2; i++) ax[i].iOrder = i;
qsort(ax, lemp->nxstate*2, sizeof(ax[0]), axset_compare);
pActtab = acttab_alloc(lemp->nsymbol, lemp->nterminal);
for(i=0; i<lemp->nxstate*2 && ax[i].nAction>0; i++){
stp = ax[i].stp;
if( ax[i].isTkn ){
for(ap=stp->ap; ap; ap=ap->next){
int action;
if( ap->sp->index>=lemp->nterminal ) continue;
action = compute_action(lemp, ap);
if( action<0 ) continue;
acttab_action(pActtab, ap->sp->index, action);
}
stp->iTknOfst = acttab_insert(pActtab, 1);
if( stp->iTknOfst<mnTknOfst ) mnTknOfst = stp->iTknOfst;
if( stp->iTknOfst>mxTknOfst ) mxTknOfst = stp->iTknOfst;
}else{
for(ap=stp->ap; ap; ap=ap->next){
int action;
if( ap->sp->index<lemp->nterminal ) continue;
if( ap->sp->index==lemp->nsymbol ) continue;
action = compute_action(lemp, ap);
if( action<0 ) continue;
acttab_action(pActtab, ap->sp->index, action);
}
stp->iNtOfst = acttab_insert(pActtab, 0);
if( stp->iNtOfst<mnNtOfst ) mnNtOfst = stp->iNtOfst;
if( stp->iNtOfst>mxNtOfst ) mxNtOfst = stp->iNtOfst;
}
#if 0 /* Uncomment for a trace of how the yy_action[] table fills out */
{ int jj, nn;
for(jj=nn=0; jj<pActtab->nAction; jj++){
if( pActtab->aAction[jj].action<0 ) nn++;
}
printf("%4d: State %3d %s n: %2d size: %5d freespace: %d\n",
i, stp->statenum, ax[i].isTkn ? "Token" : "Var ",
ax[i].nAction, pActtab->nAction, nn);
}
#endif
}
free(ax);
/* Mark rules that are actually used for reduce actions after all
** optimizations have been applied
*/
for(rp=lemp->rule; rp; rp=rp->next) rp->doesReduce = LEMON_FALSE;
for(i=0; i<lemp->nxstate; i++){
for(ap=lemp->sorted[i]->ap; ap; ap=ap->next){
if( ap->type==REDUCE || ap->type==SHIFTREDUCE ){
ap->x.rp->doesReduce = 1;
}
}
}
/* Finish rendering the constants now that the action table has
** been computed */
fprintf(out,"#define YYNSTATE %d\n",lemp->nxstate); lineno++;
fprintf(out,"#define YYNRULE %d\n",lemp->nrule); lineno++;
fprintf(out,"#define YYNRULE_WITH_ACTION %d\n",lemp->nruleWithAction);
lineno++;
fprintf(out,"#define YYNTOKEN %d\n",lemp->nterminal); lineno++;
fprintf(out,"#define YY_MAX_SHIFT %d\n",lemp->nxstate-1); lineno++;
i = lemp->minShiftReduce;
fprintf(out,"#define YY_MIN_SHIFTREDUCE %d\n",i); lineno++;
i += lemp->nrule;
fprintf(out,"#define YY_MAX_SHIFTREDUCE %d\n", i-1); lineno++;
fprintf(out,"#define YY_ERROR_ACTION %d\n", lemp->errAction); lineno++;
fprintf(out,"#define YY_ACCEPT_ACTION %d\n", lemp->accAction); lineno++;
fprintf(out,"#define YY_NO_ACTION %d\n", lemp->noAction); lineno++;
fprintf(out,"#define YY_MIN_REDUCE %d\n", lemp->minReduce); lineno++;
i = lemp->minReduce + lemp->nrule;
fprintf(out,"#define YY_MAX_REDUCE %d\n", i-1); lineno++;
tplt_xfer(lemp->name,in,out,&lineno);
/* Now output the action table and its associates:
**
** yy_action[] A single table containing all actions.
** yy_lookahead[] A table containing the lookahead for each entry in
** yy_action. Used to detect hash collisions.
** yy_shift_ofst[] For each state, the offset into yy_action for
** shifting terminals.
** yy_reduce_ofst[] For each state, the offset into yy_action for
** shifting non-terminals after a reduce.
** yy_default[] Default action for each state.
*/
/* Output the yy_action table */
lemp->nactiontab = n = acttab_action_size(pActtab);
lemp->tablesize += n*szActionType;
fprintf(out,"#define YY_ACTTAB_COUNT (%d)\n", n); lineno++;
fprintf(out,"static const YYACTIONTYPE yy_action[] = {\n"); lineno++;
for(i=j=0; i<n; i++){
int action = acttab_yyaction(pActtab, i);
if( action<0 ) action = lemp->noAction;
if( j==0 ) fprintf(out," /* %5d */ ", i);
fprintf(out, " %4d,", action);
if( j==9 || i==n-1 ){
fprintf(out, "\n"); lineno++;
j = 0;
}else{
j++;
}
}
fprintf(out, "};\n"); lineno++;
/* Output the yy_lookahead table */
lemp->nlookaheadtab = n = acttab_lookahead_size(pActtab);
lemp->tablesize += n*szCodeType;
fprintf(out,"static const YYCODETYPE yy_lookahead[] = {\n"); lineno++;
for(i=j=0; i<n; i++){
int la = acttab_yylookahead(pActtab, i);
if( la<0 ) la = lemp->nsymbol;
if( j==0 ) fprintf(out," /* %5d */ ", i);
fprintf(out, " %4d,", la);
if( j==9 ){
fprintf(out, "\n"); lineno++;
j = 0;
}else{
j++;
}
}
/* Add extra entries to the end of the yy_lookahead[] table so that
** yy_shift_ofst[]+iToken will always be a valid index into the array,
** even for the largest possible value of yy_shift_ofst[] and iToken. */
nLookAhead = lemp->nterminal + lemp->nactiontab;
while( i<nLookAhead ){
if( j==0 ) fprintf(out," /* %5d */ ", i);
fprintf(out, " %4d,", lemp->nterminal);
if( j==9 ){
fprintf(out, "\n"); lineno++;
j = 0;
}else{
j++;
}
i++;
}
if( j>0 ){ fprintf(out, "\n"); lineno++; }
fprintf(out, "};\n"); lineno++;
/* Output the yy_shift_ofst[] table */
n = lemp->nxstate;
while( n>0 && lemp->sorted[n-1]->iTknOfst==NO_OFFSET ) n--;
fprintf(out, "#define YY_SHIFT_COUNT (%d)\n", n-1); lineno++;
fprintf(out, "#define YY_SHIFT_MIN (%d)\n", mnTknOfst); lineno++;
fprintf(out, "#define YY_SHIFT_MAX (%d)\n", mxTknOfst); lineno++;
fprintf(out, "static const %s yy_shift_ofst[] = {\n",
minimum_size_type(mnTknOfst, lemp->nterminal+lemp->nactiontab, &sz));
lineno++;
lemp->tablesize += n*sz;
for(i=j=0; i<n; i++){
int ofst;
stp = lemp->sorted[i];
ofst = stp->iTknOfst;
if( ofst==NO_OFFSET ) ofst = lemp->nactiontab;
if( j==0 ) fprintf(out," /* %5d */ ", i);
fprintf(out, " %4d,", ofst);
if( j==9 || i==n-1 ){
fprintf(out, "\n"); lineno++;
j = 0;
}else{
j++;
}
}
fprintf(out, "};\n"); lineno++;
/* Output the yy_reduce_ofst[] table */
n = lemp->nxstate;
while( n>0 && lemp->sorted[n-1]->iNtOfst==NO_OFFSET ) n--;
fprintf(out, "#define YY_REDUCE_COUNT (%d)\n", n-1); lineno++;
fprintf(out, "#define YY_REDUCE_MIN (%d)\n", mnNtOfst); lineno++;
fprintf(out, "#define YY_REDUCE_MAX (%d)\n", mxNtOfst); lineno++;
fprintf(out, "static const %s yy_reduce_ofst[] = {\n",
minimum_size_type(mnNtOfst-1, mxNtOfst, &sz)); lineno++;
lemp->tablesize += n*sz;
for(i=j=0; i<n; i++){
int ofst;
stp = lemp->sorted[i];
ofst = stp->iNtOfst;
if( ofst==NO_OFFSET ) ofst = mnNtOfst - 1;
if( j==0 ) fprintf(out," /* %5d */ ", i);
fprintf(out, " %4d,", ofst);
if( j==9 || i==n-1 ){
fprintf(out, "\n"); lineno++;
j = 0;
}else{
j++;
}
}
fprintf(out, "};\n"); lineno++;
/* Output the default action table */
fprintf(out, "static const YYACTIONTYPE yy_default[] = {\n"); lineno++;
n = lemp->nxstate;
lemp->tablesize += n*szActionType;
for(i=j=0; i<n; i++){
stp = lemp->sorted[i];
if( j==0 ) fprintf(out," /* %5d */ ", i);
if( stp->iDfltReduce<0 ){
fprintf(out, " %4d,", lemp->errAction);
}else{
fprintf(out, " %4d,", stp->iDfltReduce + lemp->minReduce);
}
if( j==9 || i==n-1 ){
fprintf(out, "\n"); lineno++;
j = 0;
}else{
j++;
}
}
fprintf(out, "};\n"); lineno++;
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate the table of fallback tokens.
*/
if( lemp->has_fallback ){
int mx = lemp->nterminal - 1;
/* 2019-08-28: Generate fallback entries for every token to avoid
** having to do a range check on the index */
/* while( mx>0 && lemp->symbols[mx]->fallback==0 ){ mx--; } */
lemp->tablesize += (mx+1)*szCodeType;
for(i=0; i<=mx; i++){
struct symbol *p = lemp->symbols[i];
if( p->fallback==0 ){
fprintf(out, " 0, /* %10s => nothing */\n", p->name);
}else{
fprintf(out, " %3d, /* %10s => %s */\n", p->fallback->index,
p->name, p->fallback->name);
}
lineno++;
}
}
tplt_xfer(lemp->name, in, out, &lineno);
/* Generate a table containing the symbolic name of every symbol
*/
for(i=0; i<lemp->nsymbol; i++){
fprintf(out," /* %4d */ \"%s\",\n",i, lemp->symbols[i]->name); lineno++;
}
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate a table containing a text string that describes every
** rule in the rule set of the grammar. This information is used
** when tracing REDUCE actions.
*/
for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
assert( rp->iRule==i );
fprintf(out," /* %3d */ \"", i);
writeRuleText(out, rp);
fprintf(out,"\",\n"); lineno++;
}
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate code which executes every time a symbol is popped from
** the stack while processing errors or while destroying the parser.
** (In other words, generate the %destructor actions)
*/
if( lemp->tokendest ){
int once = 1;
for(i=0; i<lemp->nsymbol; i++){
struct symbol *sp = lemp->symbols[i];
if( sp==0 || sp->type!=TERMINAL ) continue;
if( once ){
fprintf(out, " /* TERMINAL Destructor */\n"); lineno++;
once = 0;
}
fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++;
}
for(i=0; i<lemp->nsymbol && lemp->symbols[i]->type!=TERMINAL; i++);
if( i<lemp->nsymbol ){
emit_destructor_code(out,lemp->symbols[i],lemp,&lineno);
fprintf(out," break;\n"); lineno++;
}
}
if( lemp->vardest ){
struct symbol *dflt_sp = 0;
int once = 1;
for(i=0; i<lemp->nsymbol; i++){
struct symbol *sp = lemp->symbols[i];
if( sp==0 || sp->type==TERMINAL ||
sp->index<=0 || sp->destructor!=0 ) continue;
if( once ){
fprintf(out, " /* Default NON-TERMINAL Destructor */\n");lineno++;
once = 0;
}
fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++;
dflt_sp = sp;
}
if( dflt_sp!=0 ){
emit_destructor_code(out,dflt_sp,lemp,&lineno);
}
fprintf(out," break;\n"); lineno++;
}
for(i=0; i<lemp->nsymbol; i++){
struct symbol *sp = lemp->symbols[i];
if( sp==0 || sp->type==TERMINAL || sp->destructor==0 ) continue;
if( sp->destLineno<0 ) continue; /* Already emitted */
fprintf(out," case %d: /* %s */\n", sp->index, sp->name); lineno++;
/* Combine duplicate destructors into a single case */
for(j=i+1; j<lemp->nsymbol; j++){
struct symbol *sp2 = lemp->symbols[j];
if( sp2 && sp2->type!=TERMINAL && sp2->destructor
&& sp2->dtnum==sp->dtnum
&& strcmp(sp->destructor,sp2->destructor)==0 ){
fprintf(out," case %d: /* %s */\n",
sp2->index, sp2->name); lineno++;
sp2->destLineno = -1; /* Avoid emitting this destructor again */
}
}
emit_destructor_code(out,lemp->symbols[i],lemp,&lineno);
fprintf(out," break;\n"); lineno++;
}
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate code which executes whenever the parser stack overflows */
tplt_print(out,lemp,lemp->overflow,&lineno);
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate the tables of rule information. yyRuleInfoLhs[] and
** yyRuleInfoNRhs[].
**
** Note: This code depends on the fact that rules are number
** sequentially beginning with 0.
*/
for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
fprintf(out," %4d, /* (%d) ", rp->lhs->index, i);
rule_print(out, rp);
fprintf(out," */\n"); lineno++;
}
tplt_xfer(lemp->name,in,out,&lineno);
for(i=0, rp=lemp->rule; rp; rp=rp->next, i++){
fprintf(out," %3d, /* (%d) ", -rp->nrhs, i);
rule_print(out, rp);
fprintf(out," */\n"); lineno++;
}
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate code which execution during each REDUCE action */
i = 0;
for(rp=lemp->rule; rp; rp=rp->next){
i += translate_code(lemp, rp);
}
if( i ){
fprintf(out," YYMINORTYPE yylhsminor;\n"); lineno++;
}
/* First output rules other than the default: rule */
for(rp=lemp->rule; rp; rp=rp->next){
struct rule *rp2; /* Other rules with the same action */
if( rp->codeEmitted ) continue;
if( rp->noCode ){
/* No C code actions, so this will be part of the "default:" rule */
continue;
}
fprintf(out," case %d: /* ", rp->iRule);
writeRuleText(out, rp);
fprintf(out, " */\n"); lineno++;
for(rp2=rp->next; rp2; rp2=rp2->next){
if( rp2->code==rp->code && rp2->codePrefix==rp->codePrefix
&& rp2->codeSuffix==rp->codeSuffix ){
fprintf(out," case %d: /* ", rp2->iRule);
writeRuleText(out, rp2);
fprintf(out," */ yytestcase(yyruleno==%d);\n", rp2->iRule); lineno++;
rp2->codeEmitted = 1;
}
}
emit_code(out,rp,lemp,&lineno);
fprintf(out," break;\n"); lineno++;
rp->codeEmitted = 1;
}
/* Finally, output the default: rule. We choose as the default: all
** empty actions. */
fprintf(out," default:\n"); lineno++;
for(rp=lemp->rule; rp; rp=rp->next){
if( rp->codeEmitted ) continue;
assert( rp->noCode );
fprintf(out," /* (%d) ", rp->iRule);
writeRuleText(out, rp);
if( rp->neverReduce ){
fprintf(out, " (NEVER REDUCES) */ assert(yyruleno!=%d);\n",
rp->iRule); lineno++;
}else if( rp->doesReduce ){
fprintf(out, " */ yytestcase(yyruleno==%d);\n", rp->iRule); lineno++;
}else{
fprintf(out, " (OPTIMIZED OUT) */ assert(yyruleno!=%d);\n",
rp->iRule); lineno++;
}
}
fprintf(out," break;\n"); lineno++;
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate code which executes if a parse fails */
tplt_print(out,lemp,lemp->failure,&lineno);
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate code which executes when a syntax error occurs */
tplt_print(out,lemp,lemp->error,&lineno);
tplt_xfer(lemp->name,in,out,&lineno);
/* Generate code which executes when the parser accepts its input */
tplt_print(out,lemp,lemp->accept,&lineno);
tplt_xfer(lemp->name,in,out,&lineno);
/* Append any addition code the user desires */
tplt_print(out,lemp,lemp->extracode,&lineno);
acttab_free(pActtab);
fclose(in);
fclose(out);
if( sql ) fclose(sql);
return;
}
/* Generate a header file for the parser */
void ReportHeader(struct lemon *lemp)
{
FILE *out, *in;
const char *prefix;
char line[LINESIZE];
char pattern[LINESIZE];
int i;
if( lemp->tokenprefix ) prefix = lemp->tokenprefix;
else prefix = "";
in = file_open(lemp,".h","rb");
if( in ){
int nextChar;
for(i=1; i<lemp->nterminal && fgets(line,LINESIZE,in); i++){
lemon_sprintf(pattern,"#define %s%-30s %3d\n",
prefix,lemp->symbols[i]->name,i);
if( strcmp(line,pattern) ) break;
}
nextChar = fgetc(in);
fclose(in);
if( i==lemp->nterminal && nextChar==EOF ){
/* No change in the file. Don't rewrite it. */
return;
}
}
out = file_open(lemp,".h","wb");
if( out ){
for(i=1; i<lemp->nterminal; i++){
fprintf(out,"#define %s%-30s %3d\n",prefix,lemp->symbols[i]->name,i);
}
fclose(out);
}
return;
}
/* Reduce the size of the action tables, if possible, by making use
** of defaults.
**
** In this version, we take the most frequent REDUCE action and make
** it the default. Except, there is no default if the wildcard token
** is a possible look-ahead.
*/
void CompressTables(struct lemon *lemp)
{
struct state *stp;
struct action *ap, *ap2, *nextap;
struct rule *rp, *rp2, *rbest;
int nbest, n;
int i;
int usesWildcard;
for(i=0; i<lemp->nstate; i++){
stp = lemp->sorted[i];
nbest = 0;
rbest = 0;
usesWildcard = 0;
for(ap=stp->ap; ap; ap=ap->next){
if( ap->type==SHIFT && ap->sp==lemp->wildcard ){
usesWildcard = 1;
}
if( ap->type!=REDUCE ) continue;
rp = ap->x.rp;
if( rp->lhsStart ) continue;
if( rp==rbest ) continue;
n = 1;
for(ap2=ap->next; ap2; ap2=ap2->next){
if( ap2->type!=REDUCE ) continue;
rp2 = ap2->x.rp;
if( rp2==rbest ) continue;
if( rp2==rp ) n++;
}
if( n>nbest ){
nbest = n;
rbest = rp;
}
}
/* Do not make a default if the number of rules to default
** is not at least 1 or if the wildcard token is a possible
** lookahead.
*/
if( nbest<1 || usesWildcard ) continue;
/* Combine matching REDUCE actions into a single default */
for(ap=stp->ap; ap; ap=ap->next){
if( ap->type==REDUCE && ap->x.rp==rbest ) break;
}
assert( ap );
ap->sp = Symbol_new("{default}");
for(ap=ap->next; ap; ap=ap->next){
if( ap->type==REDUCE && ap->x.rp==rbest ) ap->type = NOT_USED;
}
stp->ap = Action_sort(stp->ap);
for(ap=stp->ap; ap; ap=ap->next){
if( ap->type==SHIFT ) break;
if( ap->type==REDUCE && ap->x.rp!=rbest ) break;
}
if( ap==0 ){
stp->autoReduce = 1;
stp->pDfltReduce = rbest;
}
}
/* Make a second pass over all states and actions. Convert
** every action that is a SHIFT to an autoReduce state into
** a SHIFTREDUCE action.
*/
for(i=0; i<lemp->nstate; i++){
stp = lemp->sorted[i];
for(ap=stp->ap; ap; ap=ap->next){
struct state *pNextState;
if( ap->type!=SHIFT ) continue;
pNextState = ap->x.stp;
if( pNextState->autoReduce && pNextState->pDfltReduce!=0 ){
ap->type = SHIFTREDUCE;
ap->x.rp = pNextState->pDfltReduce;
}
}
}
/* If a SHIFTREDUCE action specifies a rule that has a single RHS term
** (meaning that the SHIFTREDUCE will land back in the state where it
** started) and if there is no C-code associated with the reduce action,
** then we can go ahead and convert the action to be the same as the
** action for the RHS of the rule.
*/
for(i=0; i<lemp->nstate; i++){
stp = lemp->sorted[i];
for(ap=stp->ap; ap; ap=nextap){
nextap = ap->next;
if( ap->type!=SHIFTREDUCE ) continue;
rp = ap->x.rp;
if( rp->noCode==0 ) continue;
if( rp->nrhs!=1 ) continue;
#if 1
/* Only apply this optimization to non-terminals. It would be OK to
** apply it to terminal symbols too, but that makes the parser tables
** larger. */
if( ap->sp->index<lemp->nterminal ) continue;
#endif
/* If we reach this point, it means the optimization can be applied */
nextap = ap;
for(ap2=stp->ap; ap2 && (ap2==ap || ap2->sp!=rp->lhs); ap2=ap2->next){}
assert( ap2!=0 );
ap->spOpt = ap2->sp;
ap->type = ap2->type;
ap->x = ap2->x;
}
}
}
/*
** Compare two states for sorting purposes. The smaller state is the
** one with the most non-terminal actions. If they have the same number
** of non-terminal actions, then the smaller is the one with the most
** token actions.
*/
static int stateResortCompare(const void *a, const void *b){
const struct state *pA = *(const struct state**)a;
const struct state *pB = *(const struct state**)b;
int n;
n = pB->nNtAct - pA->nNtAct;
if( n==0 ){
n = pB->nTknAct - pA->nTknAct;
if( n==0 ){
n = pB->statenum - pA->statenum;
}
}
assert( n!=0 );
return n;
}
/*
** Renumber and resort states so that states with fewer choices
** occur at the end. Except, keep state 0 as the first state.
*/
void ResortStates(struct lemon *lemp)
{
int i;
struct state *stp;
struct action *ap;
for(i=0; i<lemp->nstate; i++){
stp = lemp->sorted[i];
stp->nTknAct = stp->nNtAct = 0;
stp->iDfltReduce = -1; /* Init dflt action to "syntax error" */
stp->iTknOfst = NO_OFFSET;
stp->iNtOfst = NO_OFFSET;
for(ap=stp->ap; ap; ap=ap->next){
int iAction = compute_action(lemp,ap);
if( iAction>=0 ){
if( ap->sp->index<lemp->nterminal ){
stp->nTknAct++;
}else if( ap->sp->index<lemp->nsymbol ){
stp->nNtAct++;
}else{
assert( stp->autoReduce==0 || stp->pDfltReduce==ap->x.rp );
stp->iDfltReduce = iAction;
}
}
}
}
qsort(&lemp->sorted[1], lemp->nstate-1, sizeof(lemp->sorted[0]),
stateResortCompare);
for(i=0; i<lemp->nstate; i++){
lemp->sorted[i]->statenum = i;
}
lemp->nxstate = lemp->nstate;
while( lemp->nxstate>1 && lemp->sorted[lemp->nxstate-1]->autoReduce ){
lemp->nxstate--;
}
}
/***************** From the file "set.c" ************************************/
/*
** Set manipulation routines for the LEMON parser generator.
*/
static int size = 0;
/* Set the set size */
void SetSize(int n)
{
size = n+1;
}
/* Allocate a new set */
char *SetNew(void){
char *s;
s = (char*)calloc( size, 1);
if( s==0 ){
memory_error();
}
return s;
}
/* Deallocate a set */
void SetFree(char *s)
{
free(s);
}
/* Add a new element to the set. Return TRUE if the element was added
** and FALSE if it was already there. */
int SetAdd(char *s, int e)
{
int rv;
assert( e>=0 && e<size );
rv = s[e];
s[e] = 1;
return !rv;
}
/* Add every element of s2 to s1. Return TRUE if s1 changes. */
int SetUnion(char *s1, char *s2)
{
int i, progress;
progress = 0;
for(i=0; i<size; i++){
if( s2[i]==0 ) continue;
if( s1[i]==0 ){
progress = 1;
s1[i] = 1;
}
}
return progress;
}
/********************** From the file "table.c" ****************************/
/*
** All code in this file has been automatically generated
** from a specification in the file
** "table.q"
** by the associative array code building program "aagen".
** Do not edit this file! Instead, edit the specification
** file, then rerun aagen.
*/
/*
** Code for processing tables in the LEMON parser generator.
*/
PRIVATE unsigned strhash(const char *x)
{
unsigned h = 0;
while( *x ) h = h*13 + *(x++);
return h;
}
/* Works like strdup, sort of. Save a string in malloced memory, but
** keep strings in a table so that the same string is not in more
** than one place.
*/
const char *Strsafe(const char *y)
{
const char *z;
char *cpy;
if( y==0 ) return 0;
z = Strsafe_find(y);
if( z==0 && (cpy=(char *)malloc( lemonStrlen(y)+1 ))!=0 ){
lemon_strcpy(cpy,y);
z = cpy;
Strsafe_insert(z);
}
MemoryCheck(z);
return z;
}
/* There is one instance of the following structure for each
** associative array of type "x1".
*/
struct s_x1 {
int size; /* The number of available slots. */
/* Must be a power of 2 greater than or */
/* equal to 1 */
int count; /* Number of currently slots filled */
struct s_x1node *tbl; /* The data stored here */
struct s_x1node **ht; /* Hash table for lookups */
};
/* There is one instance of this structure for every data element
** in an associative array of type "x1".
*/
typedef struct s_x1node {
const char *data; /* The data */
struct s_x1node *next; /* Next entry with the same hash */
struct s_x1node **from; /* Previous link */
} x1node;
/* There is only one instance of the array, which is the following */
static struct s_x1 *x1a;
/* Allocate a new associative array */
void Strsafe_init(void){
if( x1a ) return;
x1a = (struct s_x1*)malloc( sizeof(struct s_x1) );
if( x1a ){
x1a->size = 1024;
x1a->count = 0;
x1a->tbl = (x1node*)calloc(1024, sizeof(x1node) + sizeof(x1node*));
if( x1a->tbl==0 ){
free(x1a);
x1a = 0;
}else{
int i;
x1a->ht = (x1node**)&(x1a->tbl[1024]);
for(i=0; i<1024; i++) x1a->ht[i] = 0;
}
}
}
/* Insert a new record into the array. Return TRUE if successful.
** Prior data with the same key is NOT overwritten */
int Strsafe_insert(const char *data)
{
x1node *np;
unsigned h;
unsigned ph;
if( x1a==0 ) return 0;
ph = strhash(data);
h = ph & (x1a->size-1);
np = x1a->ht[h];
while( np ){
if( strcmp(np->data,data)==0 ){
/* An existing entry with the same key is found. */
/* Fail because overwrite is not allows. */
return 0;
}
np = np->next;
}
if( x1a->count>=x1a->size ){
/* Need to make the hash table bigger */
int i,arrSize;
struct s_x1 array;
array.size = arrSize = x1a->size*2;
array.count = x1a->count;
array.tbl = (x1node*)calloc(arrSize, sizeof(x1node) + sizeof(x1node*));
if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
array.ht = (x1node**)&(array.tbl[arrSize]);
for(i=0; i<arrSize; i++) array.ht[i] = 0;
for(i=0; i<x1a->count; i++){
x1node *oldnp, *newnp;
oldnp = &(x1a->tbl[i]);
h = strhash(oldnp->data) & (arrSize-1);
newnp = &(array.tbl[i]);
if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
newnp->next = array.ht[h];
newnp->data = oldnp->data;
newnp->from = &(array.ht[h]);
array.ht[h] = newnp;
}
/* free(x1a->tbl); // This program was originally for 16-bit machines.
** Don't worry about freeing memory on modern platforms. */
*x1a = array;
}
/* Insert the new data */
h = ph & (x1a->size-1);
np = &(x1a->tbl[x1a->count++]);
np->data = data;
if( x1a->ht[h] ) x1a->ht[h]->from = &(np->next);
np->next = x1a->ht[h];
x1a->ht[h] = np;
np->from = &(x1a->ht[h]);
return 1;
}
/* Return a pointer to data assigned to the given key. Return NULL
** if no such key. */
const char *Strsafe_find(const char *key)
{
unsigned h;
x1node *np;
if( x1a==0 ) return 0;
h = strhash(key) & (x1a->size-1);
np = x1a->ht[h];
while( np ){
if( strcmp(np->data,key)==0 ) break;
np = np->next;
}
return np ? np->data : 0;
}
/* Return a pointer to the (terminal or nonterminal) symbol "x".
** Create a new symbol if this is the first time "x" has been seen.
*/
struct symbol *Symbol_new(const char *x)
{
struct symbol *sp;
sp = Symbol_find(x);
if( sp==0 ){
sp = (struct symbol *)calloc(1, sizeof(struct symbol) );
MemoryCheck(sp);
sp->name = Strsafe(x);
sp->type = ISUPPER(*x) ? TERMINAL : NONTERMINAL;
sp->rule = 0;
sp->fallback = 0;
sp->prec = -1;
sp->assoc = UNK;
sp->firstset = 0;
sp->lambda = LEMON_FALSE;
sp->destructor = 0;
sp->destLineno = 0;
sp->datatype = 0;
sp->useCnt = 0;
Symbol_insert(sp,sp->name);
}
sp->useCnt++;
return sp;
}
/* Compare two symbols for sorting purposes. Return negative,
** zero, or positive if a is less then, equal to, or greater
** than b.
**
** Symbols that begin with upper case letters (terminals or tokens)
** must sort before symbols that begin with lower case letters
** (non-terminals). And MULTITERMINAL symbols (created using the
** %token_class directive) must sort at the very end. Other than
** that, the order does not matter.
**
** We find experimentally that leaving the symbols in their original
** order (the order they appeared in the grammar file) gives the
** smallest parser tables in SQLite.
*/
int Symbolcmpp(const void *_a, const void *_b)
{
const struct symbol *a = *(const struct symbol **) _a;
const struct symbol *b = *(const struct symbol **) _b;
int i1 = a->type==MULTITERMINAL ? 3 : a->name[0]>'Z' ? 2 : 1;
int i2 = b->type==MULTITERMINAL ? 3 : b->name[0]>'Z' ? 2 : 1;
return i1==i2 ? a->index - b->index : i1 - i2;
}
/* There is one instance of the following structure for each
** associative array of type "x2".
*/
struct s_x2 {
int size; /* The number of available slots. */
/* Must be a power of 2 greater than or */
/* equal to 1 */
int count; /* Number of currently slots filled */
struct s_x2node *tbl; /* The data stored here */
struct s_x2node **ht; /* Hash table for lookups */
};
/* There is one instance of this structure for every data element
** in an associative array of type "x2".
*/
typedef struct s_x2node {
struct symbol *data; /* The data */
const char *key; /* The key */
struct s_x2node *next; /* Next entry with the same hash */
struct s_x2node **from; /* Previous link */
} x2node;
/* There is only one instance of the array, which is the following */
static struct s_x2 *x2a;
/* Allocate a new associative array */
void Symbol_init(void){
if( x2a ) return;
x2a = (struct s_x2*)malloc( sizeof(struct s_x2) );
if( x2a ){
x2a->size = 128;
x2a->count = 0;
x2a->tbl = (x2node*)calloc(128, sizeof(x2node) + sizeof(x2node*));
if( x2a->tbl==0 ){
free(x2a);
x2a = 0;
}else{
int i;
x2a->ht = (x2node**)&(x2a->tbl[128]);
for(i=0; i<128; i++) x2a->ht[i] = 0;
}
}
}
/* Insert a new record into the array. Return TRUE if successful.
** Prior data with the same key is NOT overwritten */
int Symbol_insert(struct symbol *data, const char *key)
{
x2node *np;
unsigned h;
unsigned ph;
if( x2a==0 ) return 0;
ph = strhash(key);
h = ph & (x2a->size-1);
np = x2a->ht[h];
while( np ){
if( strcmp(np->key,key)==0 ){
/* An existing entry with the same key is found. */
/* Fail because overwrite is not allows. */
return 0;
}
np = np->next;
}
if( x2a->count>=x2a->size ){
/* Need to make the hash table bigger */
int i,arrSize;
struct s_x2 array;
array.size = arrSize = x2a->size*2;
array.count = x2a->count;
array.tbl = (x2node*)calloc(arrSize, sizeof(x2node) + sizeof(x2node*));
if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
array.ht = (x2node**)&(array.tbl[arrSize]);
for(i=0; i<arrSize; i++) array.ht[i] = 0;
for(i=0; i<x2a->count; i++){
x2node *oldnp, *newnp;
oldnp = &(x2a->tbl[i]);
h = strhash(oldnp->key) & (arrSize-1);
newnp = &(array.tbl[i]);
if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
newnp->next = array.ht[h];
newnp->key = oldnp->key;
newnp->data = oldnp->data;
newnp->from = &(array.ht[h]);
array.ht[h] = newnp;
}
/* free(x2a->tbl); // This program was originally written for 16-bit
** machines. Don't worry about freeing this trivial amount of memory
** on modern platforms. Just leak it. */
*x2a = array;
}
/* Insert the new data */
h = ph & (x2a->size-1);
np = &(x2a->tbl[x2a->count++]);
np->key = key;
np->data = data;
if( x2a->ht[h] ) x2a->ht[h]->from = &(np->next);
np->next = x2a->ht[h];
x2a->ht[h] = np;
np->from = &(x2a->ht[h]);
return 1;
}
/* Return a pointer to data assigned to the given key. Return NULL
** if no such key. */
struct symbol *Symbol_find(const char *key)
{
unsigned h;
x2node *np;
if( x2a==0 ) return 0;
h = strhash(key) & (x2a->size-1);
np = x2a->ht[h];
while( np ){
if( strcmp(np->key,key)==0 ) break;
np = np->next;
}
return np ? np->data : 0;
}
/* Return the n-th data. Return NULL if n is out of range. */
struct symbol *Symbol_Nth(int n)
{
struct symbol *data;
if( x2a && n>0 && n<=x2a->count ){
data = x2a->tbl[n-1].data;
}else{
data = 0;
}
return data;
}
/* Return the size of the array */
int Symbol_count()
{
return x2a ? x2a->count : 0;
}
/* Return an array of pointers to all data in the table.
** The array is obtained from malloc. Return NULL if memory allocation
** problems, or if the array is empty. */
struct symbol **Symbol_arrayof()
{
struct symbol **array;
int i,arrSize;
if( x2a==0 ) return 0;
arrSize = x2a->count;
array = (struct symbol **)calloc(arrSize, sizeof(struct symbol *));
if( array ){
for(i=0; i<arrSize; i++) array[i] = x2a->tbl[i].data;
}
return array;
}
/* Compare two configurations */
int Configcmp(const char *_a,const char *_b)
{
const struct config *a = (struct config *) _a;
const struct config *b = (struct config *) _b;
int x;
x = a->rp->index - b->rp->index;
if( x==0 ) x = a->dot - b->dot;
return x;
}
/* Compare two states */
PRIVATE int statecmp(struct config *a, struct config *b)
{
int rc;
for(rc=0; rc==0 && a && b; a=a->bp, b=b->bp){
rc = a->rp->index - b->rp->index;
if( rc==0 ) rc = a->dot - b->dot;
}
if( rc==0 ){
if( a ) rc = 1;
if( b ) rc = -1;
}
return rc;
}
/* Hash a state */
PRIVATE unsigned statehash(struct config *a)
{
unsigned h=0;
while( a ){
h = h*571 + a->rp->index*37 + a->dot;
a = a->bp;
}
return h;
}
/* Allocate a new state structure */
struct state *State_new()
{
struct state *newstate;
newstate = (struct state *)calloc(1, sizeof(struct state) );
MemoryCheck(newstate);
return newstate;
}
/* There is one instance of the following structure for each
** associative array of type "x3".
*/
struct s_x3 {
int size; /* The number of available slots. */
/* Must be a power of 2 greater than or */
/* equal to 1 */
int count; /* Number of currently slots filled */
struct s_x3node *tbl; /* The data stored here */
struct s_x3node **ht; /* Hash table for lookups */
};
/* There is one instance of this structure for every data element
** in an associative array of type "x3".
*/
typedef struct s_x3node {
struct state *data; /* The data */
struct config *key; /* The key */
struct s_x3node *next; /* Next entry with the same hash */
struct s_x3node **from; /* Previous link */
} x3node;
/* There is only one instance of the array, which is the following */
static struct s_x3 *x3a;
/* Allocate a new associative array */
void State_init(void){
if( x3a ) return;
x3a = (struct s_x3*)malloc( sizeof(struct s_x3) );
if( x3a ){
x3a->size = 128;
x3a->count = 0;
x3a->tbl = (x3node*)calloc(128, sizeof(x3node) + sizeof(x3node*));
if( x3a->tbl==0 ){
free(x3a);
x3a = 0;
}else{
int i;
x3a->ht = (x3node**)&(x3a->tbl[128]);
for(i=0; i<128; i++) x3a->ht[i] = 0;
}
}
}
/* Insert a new record into the array. Return TRUE if successful.
** Prior data with the same key is NOT overwritten */
int State_insert(struct state *data, struct config *key)
{
x3node *np;
unsigned h;
unsigned ph;
if( x3a==0 ) return 0;
ph = statehash(key);
h = ph & (x3a->size-1);
np = x3a->ht[h];
while( np ){
if( statecmp(np->key,key)==0 ){
/* An existing entry with the same key is found. */
/* Fail because overwrite is not allows. */
return 0;
}
np = np->next;
}
if( x3a->count>=x3a->size ){
/* Need to make the hash table bigger */
int i,arrSize;
struct s_x3 array;
array.size = arrSize = x3a->size*2;
array.count = x3a->count;
array.tbl = (x3node*)calloc(arrSize, sizeof(x3node) + sizeof(x3node*));
if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
array.ht = (x3node**)&(array.tbl[arrSize]);
for(i=0; i<arrSize; i++) array.ht[i] = 0;
for(i=0; i<x3a->count; i++){
x3node *oldnp, *newnp;
oldnp = &(x3a->tbl[i]);
h = statehash(oldnp->key) & (arrSize-1);
newnp = &(array.tbl[i]);
if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
newnp->next = array.ht[h];
newnp->key = oldnp->key;
newnp->data = oldnp->data;
newnp->from = &(array.ht[h]);
array.ht[h] = newnp;
}
free(x3a->tbl);
*x3a = array;
}
/* Insert the new data */
h = ph & (x3a->size-1);
np = &(x3a->tbl[x3a->count++]);
np->key = key;
np->data = data;
if( x3a->ht[h] ) x3a->ht[h]->from = &(np->next);
np->next = x3a->ht[h];
x3a->ht[h] = np;
np->from = &(x3a->ht[h]);
return 1;
}
/* Return a pointer to data assigned to the given key. Return NULL
** if no such key. */
struct state *State_find(struct config *key)
{
unsigned h;
x3node *np;
if( x3a==0 ) return 0;
h = statehash(key) & (x3a->size-1);
np = x3a->ht[h];
while( np ){
if( statecmp(np->key,key)==0 ) break;
np = np->next;
}
return np ? np->data : 0;
}
/* Return an array of pointers to all data in the table.
** The array is obtained from malloc. Return NULL if memory allocation
** problems, or if the array is empty. */
struct state **State_arrayof(void)
{
struct state **array;
int i,arrSize;
if( x3a==0 ) return 0;
arrSize = x3a->count;
array = (struct state **)calloc(arrSize, sizeof(struct state *));
if( array ){
for(i=0; i<arrSize; i++) array[i] = x3a->tbl[i].data;
}
return array;
}
/* Hash a configuration */
PRIVATE unsigned confighash(struct config *a)
{
unsigned h=0;
h = h*571 + a->rp->index*37 + a->dot;
return h;
}
/* There is one instance of the following structure for each
** associative array of type "x4".
*/
struct s_x4 {
int size; /* The number of available slots. */
/* Must be a power of 2 greater than or */
/* equal to 1 */
int count; /* Number of currently slots filled */
struct s_x4node *tbl; /* The data stored here */
struct s_x4node **ht; /* Hash table for lookups */
};
/* There is one instance of this structure for every data element
** in an associative array of type "x4".
*/
typedef struct s_x4node {
struct config *data; /* The data */
struct s_x4node *next; /* Next entry with the same hash */
struct s_x4node **from; /* Previous link */
} x4node;
/* There is only one instance of the array, which is the following */
static struct s_x4 *x4a;
/* Allocate a new associative array */
void Configtable_init(void){
if( x4a ) return;
x4a = (struct s_x4*)malloc( sizeof(struct s_x4) );
if( x4a ){
x4a->size = 64;
x4a->count = 0;
x4a->tbl = (x4node*)calloc(64, sizeof(x4node) + sizeof(x4node*));
if( x4a->tbl==0 ){
free(x4a);
x4a = 0;
}else{
int i;
x4a->ht = (x4node**)&(x4a->tbl[64]);
for(i=0; i<64; i++) x4a->ht[i] = 0;
}
}
}
/* Insert a new record into the array. Return TRUE if successful.
** Prior data with the same key is NOT overwritten */
int Configtable_insert(struct config *data)
{
x4node *np;
unsigned h;
unsigned ph;
if( x4a==0 ) return 0;
ph = confighash(data);
h = ph & (x4a->size-1);
np = x4a->ht[h];
while( np ){
if( Configcmp((const char *) np->data,(const char *) data)==0 ){
/* An existing entry with the same key is found. */
/* Fail because overwrite is not allows. */
return 0;
}
np = np->next;
}
if( x4a->count>=x4a->size ){
/* Need to make the hash table bigger */
int i,arrSize;
struct s_x4 array;
array.size = arrSize = x4a->size*2;
array.count = x4a->count;
array.tbl = (x4node*)calloc(arrSize, sizeof(x4node) + sizeof(x4node*));
if( array.tbl==0 ) return 0; /* Fail due to malloc failure */
array.ht = (x4node**)&(array.tbl[arrSize]);
for(i=0; i<arrSize; i++) array.ht[i] = 0;
for(i=0; i<x4a->count; i++){
x4node *oldnp, *newnp;
oldnp = &(x4a->tbl[i]);
h = confighash(oldnp->data) & (arrSize-1);
newnp = &(array.tbl[i]);
if( array.ht[h] ) array.ht[h]->from = &(newnp->next);
newnp->next = array.ht[h];
newnp->data = oldnp->data;
newnp->from = &(array.ht[h]);
array.ht[h] = newnp;
}
/* free(x4a->tbl); // This code was originall written for 16-bit machines.
** on modern machines, don't worry about freeing this trival amount of
** memory. */
*x4a = array;
}
/* Insert the new data */
h = ph & (x4a->size-1);
np = &(x4a->tbl[x4a->count++]);
np->data = data;
if( x4a->ht[h] ) x4a->ht[h]->from = &(np->next);
np->next = x4a->ht[h];
x4a->ht[h] = np;
np->from = &(x4a->ht[h]);
return 1;
}
/* Return a pointer to data assigned to the given key. Return NULL
** if no such key. */
struct config *Configtable_find(struct config *key)
{
int h;
x4node *np;
if( x4a==0 ) return 0;
h = confighash(key) & (x4a->size-1);
np = x4a->ht[h];
while( np ){
if( Configcmp((const char *) np->data,(const char *) key)==0 ) break;
np = np->next;
}
return np ? np->data : 0;
}
/* Remove all data from the table. Pass each data to the function "f"
** as it is removed. ("f" may be null to avoid this step.) */
void Configtable_clear(int(*f)(struct config *))
{
int i;
if( x4a==0 || x4a->count==0 ) return;
if( f ) for(i=0; i<x4a->count; i++) (*f)(x4a->tbl[i].data);
for(i=0; i<x4a->size; i++) x4a->ht[i] = 0;
x4a->count = 0;
return;
} |
C | wireshark/tools/lemon/lempar.c | /*
** 2000-05-29
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** Driver template for the LEMON parser generator.
**
** The "lemon" program processes an LALR(1) input grammar file, then uses
** this template to construct a parser. The "lemon" program inserts text
** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the
** interstitial "-" characters) contained in this template is changed into
** the value of the %name directive from the grammar. Otherwise, the content
** of this template is copied straight through into the generate parser
** source file.
**
** The following is the concatenation of all %include directives from the
** input grammar file:
*/
/************ Begin %include sections from the grammar ************************/
%%
/**************** End of %include directives **********************************/
/* These constants specify the various numeric values for terminal symbols.
***************** Begin token definitions *************************************/
%%
/**************** End token definitions ***************************************/
/* The next sections is a series of control #defines.
** various aspects of the generated parser.
** YYCODETYPE is the data type used to store the integer codes
** that represent terminal and non-terminal symbols.
** "unsigned char" is used if there are fewer than
** 256 symbols. Larger types otherwise.
** YYNOCODE is a number of type YYCODETYPE that is not used for
** any terminal or nonterminal symbol.
** YYFALLBACK If defined, this indicates that one or more tokens
** (also known as: "terminal symbols") have fall-back
** values which should be used if the original symbol
** would not parse. This permits keywords to sometimes
** be used as identifiers, for example.
** YYACTIONTYPE is the data type used for "action codes" - numbers
** that indicate what to do in response to the next
** token.
** ParseTOKENTYPE is the data type used for minor type for terminal
** symbols. Background: A "minor type" is a semantic
** value associated with a terminal or non-terminal
** symbols. For example, for an "ID" terminal symbol,
** the minor type might be the name of the identifier.
** Each non-terminal can have a different minor type.
** Terminal symbols all have the same minor type, though.
** This macros defines the minor type for terminal
** symbols.
** YYMINORTYPE is the data type used for all minor types.
** This is typically a union of many types, one of
** which is ParseTOKENTYPE. The entry in the union
** for terminal symbols is called "yy0".
** YYSTACKDEPTH is the maximum depth of the parser's stack. If
** zero the stack is dynamically sized using realloc()
** ParseARG_SDECL A static variable declaration for the %extra_argument
** ParseARG_PDECL A parameter declaration for the %extra_argument
** ParseARG_PARAM Code to pass %extra_argument as a subroutine parameter
** ParseARG_STORE Code to store %extra_argument into yypParser
** ParseARG_FETCH Code to extract %extra_argument from yypParser
** ParseCTX_* As ParseARG_ except for %extra_context
** YYERRORSYMBOL is the code number of the error symbol. If not
** defined, then do no error processing.
** YYNSTATE the combined number of states.
** YYNRULE the number of rules in the grammar
** YYNTOKEN Number of terminal symbols
** YY_MAX_SHIFT Maximum value for shift actions
** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions
** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions
** YY_ERROR_ACTION The yy_action[] code for syntax error
** YY_ACCEPT_ACTION The yy_action[] code for accept
** YY_NO_ACTION The yy_action[] code for no-op
** YY_MIN_REDUCE Minimum value for reduce actions
** YY_MAX_REDUCE Maximum value for reduce actions
*/
#ifndef INTERFACE
# define INTERFACE 1
#endif
/************* Begin control #defines *****************************************/
%%
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
/* Define the yytestcase() macro to be a no-op if is not already defined
** otherwise.
**
** Applications can choose to define yytestcase() in the %include section
** to a macro that can assist in verifying code coverage. For production
** code the yytestcase() macro should be turned off. But it is useful
** for testing.
*/
#ifndef yytestcase
# define yytestcase(X)
#endif
/* Next are the tables used to determine what action to take based on the
** current state and lookahead token. These tables are used to implement
** functions that take a state number and lookahead value and return an
** action integer.
**
** Suppose the action integer is N. Then the action is determined as
** follows
**
** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead
** token onto the stack and goto state N.
**
** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then
** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE.
**
** N == YY_ERROR_ACTION A syntax error has occurred.
**
** N == YY_ACCEPT_ACTION The parser accepts its input.
**
** N == YY_NO_ACTION No such action. Denotes unused
** slots in the yy_action[] table.
**
** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE
** and YY_MAX_REDUCE
**
** The action table is constructed as a single large table named yy_action[].
** Given state S and lookahead X, the action is computed as either:
**
** (A) N = yy_action[ yy_shift_ofst[S] + X ]
** (B) N = yy_default[S]
**
** The (A) formula is preferred. The B formula is used instead if
** yy_lookahead[yy_shift_ofst[S]+X] is not equal to X.
**
** The formulas above are for computing the action when the lookahead is
** a terminal symbol. If the lookahead is a non-terminal (as occurs after
** a reduce action) then the yy_reduce_ofst[] array is used in place of
** the yy_shift_ofst[] array.
**
** The following are the tables generated in this section:
**
** yy_action[] A single table containing all actions.
** yy_lookahead[] A table containing the lookahead for each entry in
** yy_action. Used to detect hash collisions.
** yy_shift_ofst[] For each state, the offset into yy_action for
** shifting terminals.
** yy_reduce_ofst[] For each state, the offset into yy_action for
** shifting non-terminals after a reduce.
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
%%
/********** End of lemon-generated parsing tables *****************************/
/* The next table maps tokens (terminal symbols) into fallback tokens.
** If a construct like the following:
**
** %fallback ID X Y Z.
**
** appears in the grammar, then ID becomes a fallback token for X, Y,
** and Z. Whenever one of the tokens X, Y, or Z is input to the parser
** but it does not parse, the type of the token is changed to ID and
** the parse is retried before an error is thrown.
**
** This feature can be used, for example, to cause some keywords in a language
** to revert to identifiers if they keyword does not apply in the context where
** it appears.
*/
#ifdef YYFALLBACK
static const YYCODETYPE yyFallback[] = {
%%
};
#endif /* YYFALLBACK */
/* The following structure represents a single element of the
** parser's stack. Information stored includes:
**
** + The state number for the parser at this level of the stack.
**
** + The value of the token stored at this level of the stack.
** (In other words, the "major" token.)
**
** + The semantic value stored at this level of the stack. This is
** the information used by the action routines in the grammar.
** It is sometimes called the "minor" token.
**
** After the "shift" half of a SHIFTREDUCE action, the stateno field
** actually contains the reduce action for the second half of the
** SHIFTREDUCE.
*/
struct yyStackEntry {
YYACTIONTYPE stateno; /* The state-number, or reduce action in SHIFTREDUCE */
YYCODETYPE major; /* The major token value. This is the code
** number for the token at this stack level */
YYMINORTYPE minor; /* The user-supplied minor token value. This
** is the value of the token */
};
typedef struct yyStackEntry yyStackEntry;
/* The state of the parser is completely contained in an instance of
** the following structure */
struct yyParser {
yyStackEntry *yytos; /* Pointer to top element of the stack */
#ifdef YYTRACKMAXSTACKDEPTH
int yyhwm; /* High-water mark of the stack */
#endif
#ifndef YYNOERRORRECOVERY
int yyerrcnt; /* Shifts left before out of the error */
#endif
ParseARG_SDECL /* A place to hold %extra_argument */
ParseCTX_SDECL /* A place to hold %extra_context */
#if YYSTACKDEPTH<=0
int yystksz; /* Current side of the stack */
yyStackEntry *yystack; /* The parser's stack */
yyStackEntry yystk0; /* First stack entry */
#else
yyStackEntry yystack[YYSTACKDEPTH]; /* The parser's stack */
yyStackEntry *yystackEnd; /* Last entry in the stack */
#endif
};
typedef struct yyParser yyParser;
#include <assert.h>
#ifndef NDEBUG
#include <stdio.h>
static FILE *yyTraceFILE = 0;
static char *yyTracePrompt = 0;
#endif /* NDEBUG */
#ifndef NDEBUG
/*
** Turn parser tracing on by giving a stream to which to write the trace
** and a prompt to preface each trace message. Tracing is turned off
** by making either argument NULL
**
** Inputs:
** <ul>
** <li> A FILE* to which trace output should be written.
** If NULL, then tracing is turned off.
** <li> A prefix string written at the beginning of every
** line of trace output. If NULL, then tracing is
** turned off.
** </ul>
**
** Outputs:
** None.
*/
void ParseTrace(FILE *TraceFILE, char *zTracePrompt){
yyTraceFILE = TraceFILE;
yyTracePrompt = zTracePrompt;
if( yyTraceFILE==0 ) yyTracePrompt = 0;
else if( yyTracePrompt==0 ) yyTraceFILE = 0;
}
#endif /* NDEBUG */
#if defined(YYCOVERAGE) || !defined(NDEBUG)
/* For tracing shifts, the names of all terminals and nonterminals
** are required. The following table supplies these names */
static const char *const yyTokenName[] = {
%%
};
#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */
#ifndef NDEBUG
/* For tracing reduce actions, the names of all rules are required.
*/
static const char *const yyRuleName[] = {
%%
};
#endif /* NDEBUG */
#if YYSTACKDEPTH<=0
/*
** Try to increase the size of the parser stack. Return the number
** of errors. Return 0 on success.
*/
static int yyGrowStack(yyParser *p){
int newSize;
int idx;
yyStackEntry *pNew;
newSize = p->yystksz*2 + 100;
idx = p->yytos ? (int)(p->yytos - p->yystack) : 0;
if( p->yystack==&p->yystk0 ){
pNew = malloc(newSize*sizeof(pNew[0]));
if( pNew ) pNew[0] = p->yystk0;
}else{
pNew = realloc(p->yystack, newSize*sizeof(pNew[0]));
}
if( pNew ){
p->yystack = pNew;
p->yytos = &p->yystack[idx];
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sStack grows from %d to %d entries.\n",
yyTracePrompt, p->yystksz, newSize);
}
#endif
p->yystksz = newSize;
}
return pNew==0;
}
#endif
/* Datatype of the argument to the memory allocated passed as the
** second argument to ParseAlloc() below. This can be changed by
** putting an appropriate #define in the %include section of the input
** grammar.
*/
#ifndef YYMALLOCARGTYPE
# define YYMALLOCARGTYPE size_t
#endif
/* Initialize a new parser that has already been allocated.
*/
void ParseInit(void *yypRawParser ParseCTX_PDECL){
yyParser *yypParser = (yyParser*)yypRawParser;
ParseCTX_STORE
#ifdef YYTRACKMAXSTACKDEPTH
yypParser->yyhwm = 0;
#endif
#if YYSTACKDEPTH<=0
yypParser->yytos = NULL;
yypParser->yystack = NULL;
yypParser->yystksz = 0;
if( yyGrowStack(yypParser) ){
yypParser->yystack = &yypParser->yystk0;
yypParser->yystksz = 1;
}
#endif
#ifndef YYNOERRORRECOVERY
yypParser->yyerrcnt = -1;
#endif
yypParser->yytos = yypParser->yystack;
yypParser->yystack[0].stateno = 0;
yypParser->yystack[0].major = 0;
#if YYSTACKDEPTH>0
yypParser->yystackEnd = &yypParser->yystack[YYSTACKDEPTH-1];
#endif
}
#ifndef Parse_ENGINEALWAYSONSTACK
/*
** This function allocates a new parser.
** The only argument is a pointer to a function which works like
** malloc.
**
** Inputs:
** A pointer to the function used to allocate memory.
**
** Outputs:
** A pointer to a parser. This pointer is used in subsequent calls
** to Parse and ParseFree.
*/
void *ParseAlloc(void *(*mallocProc)(YYMALLOCARGTYPE) ParseCTX_PDECL){
yyParser *yypParser;
yypParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) );
if( yypParser ){
ParseCTX_STORE
ParseInit(yypParser ParseCTX_PARAM);
}
return (void*)yypParser;
}
#endif /* Parse_ENGINEALWAYSONSTACK */
/* The following function deletes the "minor type" or semantic value
** associated with a symbol. The symbol can be either a terminal
** or nonterminal. "yymajor" is the symbol code, and "yypminor" is
** a pointer to the value to be deleted. The code used to do the
** deletions is derived from the %destructor and/or %token_destructor
** directives of the input grammar.
*/
static void yy_destructor(
yyParser *yypParser, /* The parser */
YYCODETYPE yymajor, /* Type code for object to destroy */
YYMINORTYPE *yypminor /* The object to be destroyed */
){
ParseARG_FETCH
ParseCTX_FETCH
switch( yymajor ){
/* Here is inserted the actions which take place when a
** terminal or non-terminal is destroyed. This can happen
** when the symbol is popped from the stack during a
** reduce or during error processing or when a parser is
** being destroyed before it is finished parsing.
**
** Note: during a reduce, the only symbols destroyed are those
** which appear on the RHS of the rule, but which are *not* used
** inside the C code.
*/
/********* Begin destructor definitions ***************************************/
%%
/********* End destructor definitions *****************************************/
default: break; /* If no destructor action specified: do nothing */
}
}
/*
** Pop the parser's stack once.
**
** If there is a destructor routine associated with the token which
** is popped from the stack, then call it.
*/
static void yy_pop_parser_stack(yyParser *pParser){
yyStackEntry *yytos;
assert( pParser->yytos!=0 );
assert( pParser->yytos > pParser->yystack );
yytos = pParser->yytos--;
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sPopping %s\n",
yyTracePrompt,
yyTokenName[yytos->major]);
}
#endif
yy_destructor(pParser, yytos->major, &yytos->minor);
}
/*
** Clear all secondary memory allocations from the parser
*/
void ParseFinalize(void *p){
yyParser *pParser = (yyParser*)p;
while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser);
#if YYSTACKDEPTH<=0
if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack);
#endif
}
#ifndef Parse_ENGINEALWAYSONSTACK
/*
** Deallocate and destroy a parser. Destructors are called for
** all stack elements before shutting the parser down.
**
** If the YYPARSEFREENEVERNULL macro exists (for example because it
** is defined in a %include section of the input grammar) then it is
** assumed that the input pointer is never NULL.
*/
void ParseFree(
void *p, /* The parser to be deleted */
void (*freeProc)(void*) /* Function used to reclaim memory */
){
#ifndef YYPARSEFREENEVERNULL
if( p==0 ) return;
#endif
ParseFinalize(p);
(*freeProc)(p);
}
#endif /* Parse_ENGINEALWAYSONSTACK */
/*
** Return the peak depth of the stack for a parser.
*/
#ifdef YYTRACKMAXSTACKDEPTH
int ParseStackPeak(void *p){
yyParser *pParser = (yyParser*)p;
return pParser->yyhwm;
}
#endif
/* This array of booleans keeps track of the parser statement
** coverage. The element yycoverage[X][Y] is set when the parser
** is in state X and has a lookahead token Y. In a well-tested
** systems, every element of this matrix should end up being set.
*/
#if defined(YYCOVERAGE)
static unsigned char yycoverage[YYNSTATE][YYNTOKEN];
#endif
/*
** Write into out a description of every state/lookahead combination that
**
** (1) has not been used by the parser, and
** (2) is not a syntax error.
**
** Return the number of missed state/lookahead combinations.
*/
#if defined(YYCOVERAGE)
int ParseCoverage(FILE *out){
int stateno, iLookAhead, i;
int nMissed = 0;
for(stateno=0; stateno<YYNSTATE; stateno++){
i = yy_shift_ofst[stateno];
for(iLookAhead=0; iLookAhead<YYNTOKEN; iLookAhead++){
if( yy_lookahead[i+iLookAhead]!=iLookAhead ) continue;
if( yycoverage[stateno][iLookAhead]==0 ) nMissed++;
if( out ){
fprintf(out,"State %d lookahead %s %s\n", stateno,
yyTokenName[iLookAhead],
yycoverage[stateno][iLookAhead] ? "ok" : "missed");
}
}
}
return nMissed;
}
#endif
/*
** Find the appropriate action for a parser given the terminal
** look-ahead token iLookAhead.
*/
static YYACTIONTYPE yy_find_shift_action(
YYCODETYPE iLookAhead, /* The look-ahead token */
YYACTIONTYPE stateno /* Current state number */
){
int i;
if( stateno>YY_MAX_SHIFT ) return stateno;
assert( stateno <= YY_SHIFT_COUNT );
#if defined(YYCOVERAGE)
yycoverage[stateno][iLookAhead] = 1;
#endif
do{
i = yy_shift_ofst[stateno];
assert( i>=0 );
assert( i<=YY_ACTTAB_COUNT );
assert( i+YYNTOKEN<=(int)YY_NLOOKAHEAD );
assert( iLookAhead!=YYNOCODE );
assert( iLookAhead < YYNTOKEN );
i += iLookAhead;
assert( i<(int)YY_NLOOKAHEAD );
if( yy_lookahead[i]!=iLookAhead ){
#ifdef YYFALLBACK
YYCODETYPE iFallback; /* Fallback token */
assert( iLookAhead<sizeof(yyFallback)/sizeof(yyFallback[0]) );
iFallback = yyFallback[iLookAhead];
if( iFallback!=0 ){
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE, "%sFALLBACK %s => %s\n",
yyTracePrompt, yyTokenName[iLookAhead], yyTokenName[iFallback]);
}
#endif
assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */
iLookAhead = iFallback;
continue;
}
#endif
#ifdef YYWILDCARD
{
int j = i - iLookAhead + YYWILDCARD;
assert( j<(int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])) );
if( yy_lookahead[j]==YYWILDCARD && iLookAhead>0 ){
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE, "%sWILDCARD %s => %s\n",
yyTracePrompt, yyTokenName[iLookAhead],
yyTokenName[YYWILDCARD]);
}
#endif /* NDEBUG */
return yy_action[j];
}
}
#endif /* YYWILDCARD */
return yy_default[stateno];
}else{
assert( i>=0 && i<(int)(sizeof(yy_action)/sizeof(yy_action[0])) );
return yy_action[i];
}
}while(1);
}
/*
** Find the appropriate action for a parser given the non-terminal
** look-ahead token iLookAhead.
*/
static YYACTIONTYPE yy_find_reduce_action(
YYACTIONTYPE stateno, /* Current state number */
YYCODETYPE iLookAhead /* The look-ahead token */
){
int i;
#ifdef YYERRORSYMBOL
if( stateno>YY_REDUCE_COUNT ){
return yy_default[stateno];
}
#else
assert( stateno<=YY_REDUCE_COUNT );
#endif
i = yy_reduce_ofst[stateno];
assert( iLookAhead!=YYNOCODE );
i += iLookAhead;
#ifdef YYERRORSYMBOL
if( i<0 || i>=YY_ACTTAB_COUNT || yy_lookahead[i]!=iLookAhead ){
return yy_default[stateno];
}
#else
assert( i>=0 && i<YY_ACTTAB_COUNT );
assert( yy_lookahead[i]==iLookAhead );
#endif
return yy_action[i];
}
/*
** The following routine is called if the stack overflows.
*/
static void yyStackOverflow(yyParser *yypParser){
ParseARG_FETCH
ParseCTX_FETCH
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt);
}
#endif
while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser);
/* Here code is inserted which will execute if the parser
** stack every overflows */
/******** Begin %stack_overflow code ******************************************/
%%
/******** End %stack_overflow code ********************************************/
ParseARG_STORE /* Suppress warning about unused %extra_argument var */
ParseCTX_STORE
}
/*
** Print tracing information for a SHIFT action
*/
#ifndef NDEBUG
static void yyTraceShift(yyParser *yypParser, int yyNewState, const char *zTag){
if( yyTraceFILE ){
if( yyNewState<YYNSTATE ){
fprintf(yyTraceFILE,"%s%s '%s', go to state %d\n",
yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major],
yyNewState);
}else{
fprintf(yyTraceFILE,"%s%s '%s', pending reduce %d\n",
yyTracePrompt, zTag, yyTokenName[yypParser->yytos->major],
yyNewState - YY_MIN_REDUCE);
}
}
}
#else
# define yyTraceShift(X,Y,Z)
#endif
/*
** Perform a shift action.
*/
static void yy_shift(
yyParser *yypParser, /* The parser to be shifted */
YYACTIONTYPE yyNewState, /* The new state to shift in */
YYCODETYPE yyMajor, /* The major token to shift in */
ParseTOKENTYPE yyMinor /* The minor token to shift in */
){
yyStackEntry *yytos;
yypParser->yytos++;
#ifdef YYTRACKMAXSTACKDEPTH
if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){
yypParser->yyhwm++;
assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) );
}
#endif
#if YYSTACKDEPTH>0
if( yypParser->yytos>yypParser->yystackEnd ){
yypParser->yytos--;
yyStackOverflow(yypParser);
return;
}
#else
if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){
if( yyGrowStack(yypParser) ){
yypParser->yytos--;
yyStackOverflow(yypParser);
return;
}
}
#endif
if( yyNewState > YY_MAX_SHIFT ){
yyNewState += YY_MIN_REDUCE - YY_MIN_SHIFTREDUCE;
}
yytos = yypParser->yytos;
yytos->stateno = yyNewState;
yytos->major = yyMajor;
yytos->minor.yy0 = yyMinor;
yyTraceShift(yypParser, yyNewState, "Shift");
}
/* For rule J, yyRuleInfoLhs[J] contains the symbol on the left-hand side
** of that rule */
static const YYCODETYPE yyRuleInfoLhs[] = {
%%
};
/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number
** of symbols on the right-hand side of that rule. */
static const signed char yyRuleInfoNRhs[] = {
%%
};
static void yy_accept(yyParser*); /* Forward Declaration */
/*
** Perform a reduce action and the shift that must immediately
** follow the reduce.
**
** The yyLookahead and yyLookaheadToken parameters provide reduce actions
** access to the lookahead token (if any). The yyLookahead will be YYNOCODE
** if the lookahead token has already been consumed. As this procedure is
** only called from one place, optimizing compilers will in-line it, which
** means that the extra parameters have no performance impact.
*/
static YYACTIONTYPE yy_reduce(
yyParser *yypParser, /* The parser */
unsigned int yyruleno, /* Number of the rule by which to reduce */
int yyLookahead, /* Lookahead token, or YYNOCODE if none */
ParseTOKENTYPE yyLookaheadToken /* Value of the lookahead token */
ParseCTX_PDECL /* %extra_context */
){
int yygoto; /* The next state */
YYACTIONTYPE yyact; /* The next action */
yyStackEntry *yymsp; /* The top of the parser's stack */
int yysize; /* Amount to pop the stack */
ParseARG_FETCH
(void)yyLookahead;
(void)yyLookaheadToken;
yymsp = yypParser->yytos;
switch( yyruleno ){
/* Beginning here are the reduction cases. A typical example
** follows:
** case 0:
** #line <lineno> <grammarfile>
** { ... } // User supplied code
** #line <lineno> <thisfile>
** break;
*/
/********** Begin reduce actions **********************************************/
%%
/********** End reduce actions ************************************************/
};
assert( yyruleno<sizeof(yyRuleInfoLhs)/sizeof(yyRuleInfoLhs[0]) );
yygoto = yyRuleInfoLhs[yyruleno];
yysize = yyRuleInfoNRhs[yyruleno];
yyact = yy_find_reduce_action(yymsp[yysize].stateno,(YYCODETYPE)yygoto);
/* There are no SHIFTREDUCE actions on nonterminals because the table
** generator has simplified them to pure REDUCE actions. */
assert( !(yyact>YY_MAX_SHIFT && yyact<=YY_MAX_SHIFTREDUCE) );
/* It is not possible for a REDUCE to be followed by an error */
assert( yyact!=YY_ERROR_ACTION );
yymsp += yysize+1;
yypParser->yytos = yymsp;
yymsp->stateno = (YYACTIONTYPE)yyact;
yymsp->major = (YYCODETYPE)yygoto;
yyTraceShift(yypParser, yyact, "... then shift");
return yyact;
}
/*
** The following code executes when the parse fails
*/
#ifndef YYNOERRORRECOVERY
static void yy_parse_failed(
yyParser *yypParser /* The parser */
){
ParseARG_FETCH
ParseCTX_FETCH
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sFail!\n",yyTracePrompt);
}
#endif
while( yypParser->yytos>yypParser->yystack ) yy_pop_parser_stack(yypParser);
/* Here code is inserted which will be executed whenever the
** parser fails */
/************ Begin %parse_failure code ***************************************/
%%
/************ End %parse_failure code *****************************************/
ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
ParseCTX_STORE
}
#endif /* YYNOERRORRECOVERY */
/*
** The following code executes when a syntax error first occurs.
*/
static void yy_syntax_error(
yyParser *yypParser, /* The parser */
int yymajor, /* The major type of the error token */
ParseTOKENTYPE yyminor /* The minor type of the error token */
){
ParseARG_FETCH
ParseCTX_FETCH
#define TOKEN yyminor
/************ Begin %syntax_error code ****************************************/
%%
/************ End %syntax_error code ******************************************/
ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
ParseCTX_STORE
}
/*
** The following is executed when the parser accepts
*/
static void yy_accept(
yyParser *yypParser /* The parser */
){
ParseARG_FETCH
ParseCTX_FETCH
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sAccept!\n",yyTracePrompt);
}
#endif
#ifndef YYNOERRORRECOVERY
yypParser->yyerrcnt = -1;
#endif
assert( yypParser->yytos==yypParser->yystack );
/* Here code is inserted which will be executed whenever the
** parser accepts */
/*********** Begin %parse_accept code *****************************************/
%%
/*********** End %parse_accept code *******************************************/
ParseARG_STORE /* Suppress warning about unused %extra_argument variable */
ParseCTX_STORE
}
/* The main parser program.
** The first argument is a pointer to a structure obtained from
** "ParseAlloc" which describes the current state of the parser.
** The second argument is the major token number. The third is
** the minor token. The fourth optional argument is whatever the
** user wants (and specified in the grammar) and is available for
** use by the action routines.
**
** Inputs:
** <ul>
** <li> A pointer to the parser (an opaque structure.)
** <li> The major token number.
** <li> The minor token number.
** <li> An option argument of a grammar-specified type.
** </ul>
**
** Outputs:
** None.
*/
void Parse(
void *yyp, /* The parser */
int yymajor, /* The major token code number */
ParseTOKENTYPE yyminor /* The value for the token */
ParseARG_PDECL /* Optional %extra_argument parameter */
){
YYMINORTYPE yyminorunion;
YYACTIONTYPE yyact; /* The parser action. */
#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY)
int yyendofinput; /* True if we are at the end of input */
#endif
#ifdef YYERRORSYMBOL
int yyerrorhit = 0; /* True if yymajor has invoked an error */
#endif
yyParser *yypParser = (yyParser*)yyp; /* The parser */
ParseCTX_FETCH
ParseARG_STORE
assert( yypParser->yytos!=0 );
#if !defined(YYERRORSYMBOL) && !defined(YYNOERRORRECOVERY)
yyendofinput = (yymajor==0);
#endif
yyact = yypParser->yytos->stateno;
#ifndef NDEBUG
if( yyTraceFILE ){
if( yyact < YY_MIN_REDUCE ){
fprintf(yyTraceFILE,"%sInput '%s' in state %d\n",
yyTracePrompt,yyTokenName[yymajor],yyact);
}else{
fprintf(yyTraceFILE,"%sInput '%s' with pending reduce %d\n",
yyTracePrompt,yyTokenName[yymajor],yyact-YY_MIN_REDUCE);
}
}
#endif
while(1){ /* Exit by "break" */
assert( yypParser->yytos>=yypParser->yystack );
assert( yyact==yypParser->yytos->stateno );
yyact = yy_find_shift_action((YYCODETYPE)yymajor,yyact);
if( yyact >= YY_MIN_REDUCE ){
unsigned int yyruleno = yyact - YY_MIN_REDUCE; /* Reduce by this rule */
#ifndef NDEBUG
assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) );
if( yyTraceFILE ){
int yysize = yyRuleInfoNRhs[yyruleno];
if( yysize ){
fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n",
yyTracePrompt,
yyruleno, yyRuleName[yyruleno],
yyruleno<YYNRULE_WITH_ACTION ? "" : " without external action",
yypParser->yytos[yysize].stateno);
}else{
fprintf(yyTraceFILE, "%sReduce %d [%s]%s.\n",
yyTracePrompt, yyruleno, yyRuleName[yyruleno],
yyruleno<YYNRULE_WITH_ACTION ? "" : " without external action");
}
}
#endif /* NDEBUG */
/* Check that the stack is large enough to grow by a single entry
** if the RHS of the rule is empty. This ensures that there is room
** enough on the stack to push the LHS value */
if( yyRuleInfoNRhs[yyruleno]==0 ){
#ifdef YYTRACKMAXSTACKDEPTH
if( (int)(yypParser->yytos - yypParser->yystack)>yypParser->yyhwm ){
yypParser->yyhwm++;
assert( yypParser->yyhwm ==
(int)(yypParser->yytos - yypParser->yystack));
}
#endif
#if YYSTACKDEPTH>0
if( yypParser->yytos>=yypParser->yystackEnd ){
yyStackOverflow(yypParser);
break;
}
#else
if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz-1] ){
if( yyGrowStack(yypParser) ){
yyStackOverflow(yypParser);
break;
}
}
#endif
}
yyact = yy_reduce(yypParser,yyruleno,yymajor,yyminor ParseCTX_PARAM);
}else if( yyact <= YY_MAX_SHIFTREDUCE ){
yy_shift(yypParser,yyact,(YYCODETYPE)yymajor,yyminor);
#ifndef YYNOERRORRECOVERY
yypParser->yyerrcnt--;
#endif
break;
}else if( yyact==YY_ACCEPT_ACTION ){
yypParser->yytos--;
yy_accept(yypParser);
return;
}else{
assert( yyact == YY_ERROR_ACTION );
yyminorunion.yy0 = yyminor;
#ifdef YYERRORSYMBOL
int yymx;
#endif
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sSyntax Error!\n",yyTracePrompt);
}
#endif
#ifdef YYERRORSYMBOL
/* A syntax error has occurred.
** The response to an error depends upon whether or not the
** grammar defines an error token "ERROR".
**
** This is what we do if the grammar does define ERROR:
**
** * Call the %syntax_error function.
**
** * Begin popping the stack until we enter a state where
** it is legal to shift the error symbol, then shift
** the error symbol.
**
** * Set the error count to three.
**
** * Begin accepting and shifting new tokens. No new error
** processing will occur until three tokens have been
** shifted successfully.
**
*/
if( yypParser->yyerrcnt<0 ){
yy_syntax_error(yypParser,yymajor,yyminor);
}
yymx = yypParser->yytos->major;
if( yymx==YYERRORSYMBOL || yyerrorhit ){
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sDiscard input token %s\n",
yyTracePrompt,yyTokenName[yymajor]);
}
#endif
yy_destructor(yypParser, (YYCODETYPE)yymajor, &yyminorunion);
yymajor = YYNOCODE;
}else{
while( yypParser->yytos > yypParser->yystack ){
yyact = yy_find_reduce_action(yypParser->yytos->stateno,
YYERRORSYMBOL);
if( yyact<=YY_MAX_SHIFTREDUCE ) break;
yy_pop_parser_stack(yypParser);
}
if( yypParser->yytos <= yypParser->yystack || yymajor==0 ){
yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
yy_parse_failed(yypParser);
#ifndef YYNOERRORRECOVERY
yypParser->yyerrcnt = -1;
#endif
yymajor = YYNOCODE;
}else if( yymx!=YYERRORSYMBOL ){
yy_shift(yypParser,yyact,YYERRORSYMBOL,yyminor);
}
}
yypParser->yyerrcnt = 3;
yyerrorhit = 1;
if( yymajor==YYNOCODE ) break;
yyact = yypParser->yytos->stateno;
#elif defined(YYNOERRORRECOVERY)
/* If the YYNOERRORRECOVERY macro is defined, then do not attempt to
** do any kind of error recovery. Instead, simply invoke the syntax
** error routine and continue going as if nothing had happened.
**
** Applications can set this macro (for example inside %include) if
** they intend to abandon the parse upon the first syntax error seen.
*/
yy_syntax_error(yypParser,yymajor, yyminor);
yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
break;
#else /* YYERRORSYMBOL is not defined */
/* This is what we do if the grammar does not define ERROR:
**
** * Report an error message, and throw away the input token.
**
** * If the input token is $, then fail the parse.
**
** As before, subsequent error messages are suppressed until
** three input tokens have been successfully shifted.
*/
if( yypParser->yyerrcnt<=0 ){
yy_syntax_error(yypParser,yymajor, yyminor);
}
yypParser->yyerrcnt = 3;
yy_destructor(yypParser,(YYCODETYPE)yymajor,&yyminorunion);
if( yyendofinput ){
yy_parse_failed(yypParser);
#ifndef YYNOERRORRECOVERY
yypParser->yyerrcnt = -1;
#endif
}
break;
#endif
}
}
#ifndef NDEBUG
if( yyTraceFILE ){
yyStackEntry *i;
char cDiv = '[';
fprintf(yyTraceFILE,"%sReturn. Stack=",yyTracePrompt);
for(i=&yypParser->yystack[1]; i<=yypParser->yytos; i++){
fprintf(yyTraceFILE,"%c%s", cDiv, yyTokenName[i->major]);
cDiv = ' ';
}
fprintf(yyTraceFILE,"]\n");
}
#endif
return;
}
/*
** Return the fallback token corresponding to canonical token iToken, or
** 0 if iToken has no fallback.
*/
int ParseFallback(int iToken){
#ifdef YYFALLBACK
assert( iToken<(int)(sizeof(yyFallback)/sizeof(yyFallback[0])) );
return yyFallback[iToken];
#else
(void)iToken;
return 0;
#endif
} |
wireshark/tools/lemon/README | The Lemon Parser Generator's home page is: https://www.hwaci.com/sw/lemon/
Lemon seems now to be maintained at: https://sqlite.org/lemon.html
Documentation is available at: https://sqlite.org/src/doc/trunk/doc/lemon.html
Git mirror of the upstream Fossil repository: https://github.com/mackyle/sqlite
The lempar.c and lemon.c are taken from sqlite and are modified as little as
possible to make it easier to synchronize changes. Last updated at:
commit a913f942cf6b32b85de6428fd542b39458df2a88
Author: D. Richard Hipp <[email protected]>
Date: Wed Dec 28 14:03:47 2022 +0000
Version 3.40.1
To check for changes (adjust "previous commit" accordingly):
git clone --depth=1000 https://github.com/sqlite/sqlite
cd sqlite/tools
git log -p 273ee15121.. lemon.c lempar.c
To create a Wireshark version (steps 1-3) and validate the result (steps 4-5):
1. Copy the two files.
2. Run ./apply-patches.sh to apply local patches.
3. Update the commit in this README (to ensure the base is known).
4. Check for CSA warnings: clang-check -analyze lemon.c --
5. Build and run lemon: ninja epan/dfilter/grammar.c
To keep the lemon source as pristine as possible from upstream all warnings
when building lemon itself are disabled. Only patch the lemon source code as
a last resort.
Warnings for lemon generated code are few in practice with -Wall -Wextra. These
are preferably selectively disabled in the Wireshark build.
The patches to lemon to silence compiler warnings and static analysis reports
(for edge cases that cannot occur) are not proposed upstream because that
process is difficult. From <https://www.sqlite.org/copyright.html>:
SQLite is open-source, meaning that you can make as many copies of it as you
want and do whatever you want with those copies, without limitation. But
SQLite is not open-contribution. In order to keep SQLite in the public
domain and ensure that the code does not become contaminated with
proprietary or licensed content, the project does not accept patches from
unknown persons.
A note about the Lemon patches, we have no intention to fork Lemon and maintain
it. These patches are written to address static analyzer warnings without
actually modifying the functionality. If upstream is willing to accept patches,
then that would be great and the intention is to make it as easy as possible.
The lemon and lempar patches are dedicated to the public domain, as set forward
in Creative Commons Zero v1.0 Universal (IANAL, but I hope this is sufficient). |
|
Shell Script | wireshark/tools/oss-fuzzshark/build.sh | #!/bin/bash -eux
# Copyright 2017 Google Inc.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# TODO: support specifing targets in args. Google oss-fuzz specifies 'all'.
# TODO update oss-fuzz configuration to build with OSS_FUZZ=1? This is necessary
# to build the fuzzshark_* targets for oss-fuzz.
cmake -DOSS_FUZZ=1 .
cmake --build . --target all-fuzzers
for file in run/fuzzshark_*; do
fuzzer_name="${file##*/}"
fuzzer_target="${fuzzer_name#fuzzshark_}"
mv "$file" "$OUT/"
echo -en "[libfuzzer]\nmax_len = 1024\n" > $OUT/${fuzzer_name}.options
if [ -d "$SAMPLES_DIR/${fuzzer_target}" ]; then
zip -j $OUT/${fuzzer_name}_seed_corpus.zip $SAMPLES_DIR/${fuzzer_target}/*/*.bin
fi
done |
wireshark/tools/pidl/expr.yp | # expr.yp
# Copyright (C) 2006 Jelmer Vernooij <[email protected]>
# Published under the GNU GPL
#
%left '->'
%right '!' '~'
%left '*' '/' '%'
%left '+' '-'
%left '<<' '>>'
%left '>' '<'
%left '==' '!='
%left '&'
%left '|'
%left '&&'
%left '||'
%left '?' ':'
%left NEG DEREF ADDROF INV
%left '.'
%%
exp:
NUM
|
TEXT { "\"$_[1]\"" }
|
func
|
var
|
'~' exp %prec INV { "~$_[2]" }
|
exp '+' exp { "$_[1] + $_[3]" }
|
exp '-' exp { "$_[1] - $_[3]" }
|
exp '*' exp { "$_[1] * $_[3]" }
|
exp '%' exp { "$_[1] % $_[3]" }
|
exp '<' exp { "$_[1] < $_[3]" }
|
exp '>' exp { "$_[1] > $_[3]" }
|
exp '|' exp { "$_[1] | $_[3]" }
|
exp '==' exp { "$_[1] == $_[3]" }
|
exp '<=' exp { "$_[1] <= $_[3]" }
|
exp '=>' exp { "$_[1] => $_[3]" }
|
exp '<<' exp { "$_[1] << $_[3]" }
|
exp '>>' exp { "$_[1] >> $_[3]" }
|
exp '!=' exp { "$_[1] != $_[3]" }
|
exp '||' exp { "$_[1] || $_[3]" }
|
exp '&&' exp { "$_[1] && $_[3]" }
|
exp '&' exp { "$_[1] & $_[3]" }
|
exp '?' exp ':' exp { "$_[1]?$_[3]:$_[5]" }
|
'~' exp { "~$_[1]" }
|
'!' exp { "not $_[1]" }
|
exp '/' exp { "$_[1] / $_[3]" }
|
'-' exp %prec NEG { "-$_[2]" }
|
'&' exp %prec ADDROF { "&$_[2]" }
|
exp '^' exp { "$_[1]^$_[3]" }
|
'(' exp ')' { "($_[2])" }
;
possible_pointer:
VAR { $_[0]->_Lookup($_[1]) }
|
'*' possible_pointer %prec DEREF { $_[0]->_Dereference($_[2]); "*$_[2]" }
;
var:
possible_pointer { $_[0]->_Use($_[1]) }
|
var '.' VAR { $_[0]->_Use("$_[1].$_[3]") }
|
'(' var ')' { "($_[2])" }
|
var '->' VAR { $_[0]->_Use("*$_[1]"); $_[1]."->".$_[3] }
;
func:
VAR '(' opt_args ')' { "$_[1]($_[3])" }
;
opt_args:
#empty
{ "" }
|
args
;
exp_or_possible_pointer:
exp
|
possible_pointer
;
args:
exp_or_possible_pointer
|
exp_or_possible_pointer ',' args { "$_[1], $_[3]" }
;
%%
package Parse::Pidl::Expr;
sub _Lexer {
my($parser)=shift;
$parser->YYData->{INPUT}=~s/^[ \t]//;
for ($parser->YYData->{INPUT}) {
if (s/^(0x[0-9A-Fa-f]+)//) {
$parser->YYData->{LAST_TOKEN} = $1;
return('NUM',$1);
}
if (s/^([0-9]+(?:\.[0-9]+)?)//) {
$parser->YYData->{LAST_TOKEN} = $1;
return('NUM',$1);
}
if (s/^([A-Za-z_][A-Za-z0-9_]*)//) {
$parser->YYData->{LAST_TOKEN} = $1;
return('VAR',$1);
}
if (s/^\"(.*?)\"//) {
$parser->YYData->{LAST_TOKEN} = $1;
return('TEXT',$1);
}
if (s/^(==|!=|<=|>=|->|\|\||<<|>>|&&)//s) {
$parser->YYData->{LAST_TOKEN} = $1;
return($1,$1);
}
if (s/^(.)//s) {
$parser->YYData->{LAST_TOKEN} = $1;
return($1,$1);
}
}
}
sub _Use($$)
{
my ($self, $x) = @_;
if (defined($self->YYData->{USE})) {
return $self->YYData->{USE}->($x);
}
return $x;
}
sub _Lookup($$)
{
my ($self, $x) = @_;
return $self->YYData->{LOOKUP}->($x);
}
sub _Dereference($$)
{
my ($self, $x) = @_;
if (defined($self->YYData->{DEREFERENCE})) {
$self->YYData->{DEREFERENCE}->($x);
}
}
sub _Error($)
{
my ($self) = @_;
if (defined($self->YYData->{LAST_TOKEN})) {
$self->YYData->{ERROR}->("Parse error in `".$self->YYData->{FULL_INPUT}."' near `". $self->YYData->{LAST_TOKEN} . "'");
} else {
$self->YYData->{ERROR}->("Parse error in `".$self->YYData->{FULL_INPUT}."'");
}
}
sub Run {
my($self, $data, $error, $lookup, $deref, $use) = @_;
$self->YYData->{FULL_INPUT} = $data;
$self->YYData->{INPUT} = $data;
$self->YYData->{LOOKUP} = $lookup;
$self->YYData->{DEREFERENCE} = $deref;
$self->YYData->{ERROR} = $error;
$self->YYData->{USE} = $use;
return $self->YYParse( yylex => \&_Lexer, yyerror => \&_Error);
} |
|
wireshark/tools/pidl/idl.yp | ########################
# IDL Parse::Yapp parser
# Copyright (C) Andrew Tridgell <[email protected]>
# released under the GNU GPL version 3 or later
# the precedence actually doesn't matter at all for this grammar, but
# by providing a precedence we reduce the number of conflicts
# enormously
%left '-' '+' '&' '|' '*' '>' '.' '/' '(' ')' '[' ',' ';'
################
# grammar
%%
idl:
#empty { {} }
|
idl interface { push(@{$_[1]}, $_[2]); $_[1] }
|
idl coclass { push(@{$_[1]}, $_[2]); $_[1] }
|
idl import { push(@{$_[1]}, $_[2]); $_[1] }
|
idl include { push(@{$_[1]}, $_[2]); $_[1] }
|
idl importlib { push(@{$_[1]}, $_[2]); $_[1] }
|
idl cpp_quote { push(@{$_[1]}, $_[2]); $_[1] }
;
import:
'import' commalist ';'
{{
"TYPE" => "IMPORT",
"PATHS" => $_[2],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
include:
'include' commalist ';'
{{
"TYPE" => "INCLUDE",
"PATHS" => $_[2],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
importlib:
'importlib' commalist ';'
{{
"TYPE" => "IMPORTLIB",
"PATHS" => $_[2],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
commalist:
text { [ $_[1] ] }
|
commalist ',' text { push(@{$_[1]}, $_[3]); $_[1] }
;
coclass:
property_list 'coclass' identifier '{' interface_names '}' optional_semicolon
{{
"TYPE" => "COCLASS",
"PROPERTIES" => $_[1],
"NAME" => $_[3],
"DATA" => $_[5],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
interface_names:
#empty { {} }
|
interface_names 'interface' identifier ';' { push(@{$_[1]}, $_[2]); $_[1] }
;
interface:
property_list 'interface' identifier base_interface '{' definitions '}' optional_semicolon
{{
"TYPE" => "INTERFACE",
"PROPERTIES" => $_[1],
"NAME" => $_[3],
"BASE" => $_[4],
"DATA" => $_[6],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
base_interface:
#empty
|
':' identifier { $_[2] }
;
cpp_quote:
'cpp_quote' '(' text ')'
{{
"TYPE" => "CPP_QUOTE",
"DATA" => $_[3],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
definitions:
definition { [ $_[1] ] }
|
definitions definition { push(@{$_[1]}, $_[2]); $_[1] }
;
definition:
function
|
const
|
typedef
|
typedecl
;
const:
'const' identifier pointers identifier '=' anytext ';'
{{
"TYPE" => "CONST",
"DTYPE" => $_[2],
"POINTERS" => $_[3],
"NAME" => $_[4],
"VALUE" => $_[6],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
|
'const' identifier pointers identifier array_len '=' anytext ';'
{{
"TYPE" => "CONST",
"DTYPE" => $_[2],
"POINTERS" => $_[3],
"NAME" => $_[4],
"ARRAY_LEN" => $_[5],
"VALUE" => $_[7],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
function:
property_list type identifier '(' element_list2 ')' ';'
{{
"TYPE" => "FUNCTION",
"NAME" => $_[3],
"RETURN_TYPE" => $_[2],
"PROPERTIES" => $_[1],
"ELEMENTS" => $_[5],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
typedef:
property_list 'typedef' type pointers identifier array_len ';'
{{
"TYPE" => "TYPEDEF",
"PROPERTIES" => $_[1],
"NAME" => $_[5],
"DATA" => $_[3],
"POINTERS" => $_[4],
"ARRAY_LEN" => $_[6],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
usertype:
struct
|
union
|
enum
|
bitmap
|
pipe
;
typedecl:
usertype ';' { $_[1] }
;
sign:
'signed'
|
'unsigned'
;
existingtype:
sign identifier { ($_[1]?$_[1]:"signed") ." $_[2]" }
|
identifier
;
type:
usertype
|
existingtype
|
void { "void" }
;
enum_body:
'{' enum_elements '}' { $_[2] }
;
opt_enum_body:
#empty
|
enum_body
;
enum:
property_list 'enum' optional_identifier opt_enum_body
{{
"TYPE" => "ENUM",
"PROPERTIES" => $_[1],
"NAME" => $_[3],
"ELEMENTS" => $_[4],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
enum_elements:
enum_element { [ $_[1] ] }
|
enum_elements ',' enum_element { push(@{$_[1]}, $_[3]); $_[1] }
;
enum_element:
identifier
|
identifier '=' anytext { "$_[1]$_[2]$_[3]" }
;
bitmap_body:
'{' opt_bitmap_elements '}' { $_[2] }
;
opt_bitmap_body:
#empty
|
bitmap_body
;
bitmap:
property_list 'bitmap' optional_identifier opt_bitmap_body
{{
"TYPE" => "BITMAP",
"PROPERTIES" => $_[1],
"NAME" => $_[3],
"ELEMENTS" => $_[4],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
bitmap_elements:
bitmap_element { [ $_[1] ] }
|
bitmap_elements ',' bitmap_element { push(@{$_[1]}, $_[3]); $_[1] }
;
opt_bitmap_elements:
#empty
|
bitmap_elements
;
bitmap_element:
identifier '=' anytext { "$_[1] ( $_[3] )" }
;
struct_body:
'{' element_list1 '}' { $_[2] }
;
opt_struct_body:
#empty
|
struct_body
;
struct:
property_list 'struct' optional_identifier opt_struct_body
{{
"TYPE" => "STRUCT",
"PROPERTIES" => $_[1],
"NAME" => $_[3],
"ELEMENTS" => $_[4],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
empty_element:
property_list ';'
{{
"NAME" => "",
"TYPE" => "EMPTY",
"PROPERTIES" => $_[1],
"POINTERS" => 0,
"ARRAY_LEN" => [],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
base_or_empty:
base_element ';'
|
empty_element;
optional_base_element:
property_list base_or_empty { $_[2]->{PROPERTIES} = FlattenHash([$_[1],$_[2]->{PROPERTIES}]); $_[2] }
;
union_elements:
#empty
|
union_elements optional_base_element { push(@{$_[1]}, $_[2]); $_[1] }
;
union_body:
'{' union_elements '}' { $_[2] }
;
opt_union_body:
#empty
|
union_body
;
union:
property_list 'union' optional_identifier opt_union_body
{{
"TYPE" => "UNION",
"PROPERTIES" => $_[1],
"NAME" => $_[3],
"ELEMENTS" => $_[4],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
base_element:
property_list type pointers identifier array_len
{{
"NAME" => $_[4],
"TYPE" => $_[2],
"PROPERTIES" => $_[1],
"POINTERS" => $_[3],
"ARRAY_LEN" => $_[5],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
pointers:
#empty
{ 0 }
|
pointers '*' { $_[1]+1 }
;
pipe:
property_list 'pipe' type
{{
"TYPE" => "PIPE",
"PROPERTIES" => $_[1],
"NAME" => undef,
"DATA" => {
"TYPE" => "STRUCT",
"PROPERTIES" => $_[1],
"NAME" => undef,
"ELEMENTS" => [{
"NAME" => "count",
"PROPERTIES" => $_[1],
"POINTERS" => 0,
"ARRAY_LEN" => [],
"TYPE" => "uint3264",
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
},{
"NAME" => "array",
"PROPERTIES" => $_[1],
"POINTERS" => 0,
"ARRAY_LEN" => [ "count" ],
"TYPE" => $_[3],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}],
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
},
"FILE" => $_[0]->YYData->{FILE},
"LINE" => $_[0]->YYData->{LINE},
}}
;
element_list1:
#empty
{ [] }
|
element_list1 base_element ';' { push(@{$_[1]}, $_[2]); $_[1] }
;
optional_const:
#empty
|
'const'
;
element_list2:
#empty
|
'void'
|
optional_const base_element { [ $_[2] ] }
|
element_list2 ',' optional_const base_element { push(@{$_[1]}, $_[4]); $_[1] }
;
array_len:
#empty { [] }
|
'[' ']' array_len { push(@{$_[3]}, "*"); $_[3] }
|
'[' anytext ']' array_len { push(@{$_[4]}, "$_[2]"); $_[4] }
;
property_list:
#empty
|
property_list '[' properties ']' { FlattenHash([$_[1],$_[3]]); }
;
properties:
property { $_[1] }
|
properties ',' property { FlattenHash([$_[1], $_[3]]); }
;
property:
identifier {{ "$_[1]" => "1" }}
|
identifier '(' commalisttext ')' {{ "$_[1]" => "$_[3]" }}
;
commalisttext:
anytext
|
commalisttext ',' anytext { "$_[1],$_[3]" }
;
anytext:
#empty
{ "" }
|
identifier
|
constant
|
text
|
anytext '-' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '.' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '*' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '>' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '<' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '|' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '&' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '/' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '?' anytext { "$_[1]$_[2]$_[3]" }
|
anytext ':' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '=' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '+' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '~' anytext { "$_[1]$_[2]$_[3]" }
|
anytext '(' commalisttext ')' anytext { "$_[1]$_[2]$_[3]$_[4]$_[5]" }
|
anytext '{' commalisttext '}' anytext { "$_[1]$_[2]$_[3]$_[4]$_[5]" }
;
identifier:
IDENTIFIER
;
optional_identifier:
#empty { undef }
|
IDENTIFIER
;
constant:
CONSTANT
;
text:
TEXT { "\"$_[1]\"" }
;
optional_semicolon:
#empty
|
';'
;
#####################################
# start code
%%
use Parse::Pidl qw(error);
#####################################################################
# flatten an array of hashes into a single hash
sub FlattenHash($)
{
my $a = shift;
my %b;
for my $d (@{$a}) {
for my $k (keys %{$d}) {
$b{$k} = $d->{$k};
}
}
return \%b;
}
#####################################################################
# traverse a perl data structure removing any empty arrays or
# hashes and any hash elements that map to undef
sub CleanData($)
{
sub CleanData($);
my($v) = shift;
return undef if (not defined($v));
if (ref($v) eq "ARRAY") {
foreach my $i (0 .. $#{$v}) {
CleanData($v->[$i]);
}
# this removes any undefined elements from the array
@{$v} = grep { defined $_ } @{$v};
} elsif (ref($v) eq "HASH") {
foreach my $x (keys %{$v}) {
CleanData($v->{$x});
if (!defined $v->{$x}) {
delete($v->{$x});
next;
}
}
}
return $v;
}
sub _Error {
if (exists $_[0]->YYData->{ERRMSG}) {
error($_[0]->YYData, $_[0]->YYData->{ERRMSG});
delete $_[0]->YYData->{ERRMSG};
return;
}
my $last_token = $_[0]->YYData->{LAST_TOKEN};
error($_[0]->YYData, "Syntax error near '$last_token'");
}
sub _Lexer($)
{
my($parser)=shift;
$parser->YYData->{INPUT} or return('',undef);
again:
$parser->YYData->{INPUT} =~ s/^[ \t]*//;
for ($parser->YYData->{INPUT}) {
if (/^\#/) {
# Linemarker format is described at
# https://gcc.gnu.org/onlinedocs/cpp/Preprocessor-Output.html
if (s/^\# (\d+) \"(.*?)\"(( \d+){1,4}|)//) {
$parser->YYData->{LINE} = $1-1;
$parser->YYData->{FILE} = $2;
goto again;
}
if (s/^\#line (\d+) \"(.*?)\"( \d+|)//) {
$parser->YYData->{LINE} = $1-1;
$parser->YYData->{FILE} = $2;
goto again;
}
if (s/^(\#.*)$//m) {
goto again;
}
}
if (s/^(\n)//) {
$parser->YYData->{LINE}++;
goto again;
}
if (s/^\"(.*?)\"//) {
$parser->YYData->{LAST_TOKEN} = $1;
return('TEXT',$1);
}
if (s/^(\d+)(\W|$)/$2/) {
$parser->YYData->{LAST_TOKEN} = $1;
return('CONSTANT',$1);
}
if (s/^([\w_]+)//) {
$parser->YYData->{LAST_TOKEN} = $1;
if ($1 =~
/^(coclass|interface|import|importlib
|include|cpp_quote|typedef
|union|struct|enum|bitmap|pipe
|void|const|unsigned|signed)$/x) {
return $1;
}
return('IDENTIFIER',$1);
}
if (s/^(.)//s) {
$parser->YYData->{LAST_TOKEN} = $1;
return($1,$1);
}
}
}
sub parse_string
{
my ($data,$filename) = @_;
my $self = new Parse::Pidl::IDL;
$self->YYData->{FILE} = $filename;
$self->YYData->{INPUT} = $data;
$self->YYData->{LINE} = 0;
$self->YYData->{LAST_TOKEN} = "NONE";
my $idl = $self->YYParse( yylex => \&_Lexer, yyerror => \&_Error );
return CleanData($idl);
}
sub parse_file($$)
{
my ($filename,$incdirs) = @_;
my $saved_delim = $/;
undef $/;
my $cpp = $ENV{CPP};
my $options = "";
if (! defined $cpp) {
if (defined $ENV{CC}) {
$cpp = "$ENV{CC}";
$options = "-E";
} else {
$cpp = "cpp";
}
}
my $includes = join('',map { " -I$_" } @$incdirs);
my $data = `$cpp $options -D__PIDL__$includes -xc "$filename"`;
$/ = $saved_delim;
return parse_string($data, $filename);
} |
|
Perl | wireshark/tools/pidl/Makefile.PL | use ExtUtils::MakeMaker;
WriteMakefile(
'NAME' => 'Parse::Pidl',
'VERSION_FROM' => 'lib/Parse/Pidl.pm',
'EXE_FILES' => [ 'pidl' ],
'test' => { 'TESTS' => 'tests/*.pl' }
);
sub MY::postamble {
<<'EOT';
lib/Parse/Pidl/IDL.pm: idl.yp
yapp -m 'Parse::Pidl::IDL' -o lib/Parse/Pidl/IDL.pm idl.yp
lib/Parse/Pidl/Expr.pm: expr.yp
yapp -m 'Parse::Pidl::Expr' -o lib/Parse/Pidl/Expr.pm expr.yp
EOT
} |
wireshark/tools/pidl/MANIFEST | MANIFEST
tests/parse_idl.pl
tests/Util.pm
tests/ndr_refptr.pl
tests/ndr_string.pl
tests/ndr_simple.pl
tests/ndr_align.pl
tests/ndr_alloc.pl
tests/ndr_array.pl
tests/ndr.pl
tests/samba-ndr.pl
tests/util.pl
tests/test_util.pl
tests/ndr_represent.pl
tests/ndr_compat.pl
tests/ndr_fullptr.pl
tests/ndr_tagtype.pl
tests/header.pl
lib/Parse/Pidl/Samba3/ClientNDR.pm
lib/Parse/Pidl/Samba3/ServerNDR.pm
lib/Parse/Pidl/Samba4/NDR/Server.pm
lib/Parse/Pidl/Samba4/NDR/Parser.pm
lib/Parse/Pidl/Samba4/NDR/Client.pm
lib/Parse/Pidl/Samba4/Header.pm
lib/Parse/Pidl/Samba4/TDR.pm
lib/Parse/Pidl/Samba4/Template.pm
lib/Parse/Pidl/Samba4.pm
lib/Parse/Pidl/Wireshark/Conformance.pm
lib/Parse/Pidl/Wireshark/NDR.pm
lib/Parse/Pidl/Typelist.pm
lib/Parse/Pidl/Dump.pm
lib/Parse/Pidl/Compat.pm
lib/Parse/Pidl/Util.pm
lib/Parse/Pidl/NDR.pm
lib/Parse/Pidl.pm
Makefile.PL
idl.yp
TODO
README
pidl
META.yml |
|
YAML | wireshark/tools/pidl/META.yml | name: Parse-Pidl
abstract: Generate parsers / DCE/RPC-clients from IDL
author:
- Andrew Tridgell <[email protected]>
- Jelmer Vernooij <[email protected]>
- Stefan Metzmacher <[email protected]>
- Tim Potter <[email protected]>
license: gplv3
installdirs: site
homepage: http://www.samba.org/
bugtracker: http://bugzilla.samba.org/
requires:
Parse::Yapp: 0
recommends:
Data::Dumper: 0
meta-spec:
version: 1.3
url: http://module-build.sourceforge.net/META-spec-v1.3.html |
wireshark/tools/pidl/pidl | #!/usr/bin/env perl
###################################################
# package to parse IDL files and generate code for
# rpc functions in Samba
# Copyright [email protected] 2000-2003
# Copyright [email protected] 2005-2007
# released under the GNU GPL
=pod
=head1 NAME
pidl - An IDL compiler written in Perl
=head1 SYNOPSIS
pidl --help
pidl [--outputdir[=OUTNAME]] [--includedir DIR...] [--parse-idl-tree] [--dump-idl-tree] [--dump-ndr-tree] [--header[=OUTPUT]] [--python[=OUTPUT]] [--ndr-parser[=OUTPUT]] [--client] [--server] [--warn-compat] [--quiet] [--verbose] [--template] [--ws-parser[=OUTPUT]] [--diff] [--dump-idl] [--tdr-parser[=OUTPUT]] [--samba3-ndr-client[=OUTPUT]] [--samba3-ndr-server[=OUTPUT]] [--typelib=[OUTPUT]] [<idlfile>.idl]...
=head1 DESCRIPTION
pidl is an IDL compiler written in Perl that aims to be somewhat
compatible with the midl compiler. IDL is short for
"Interface Definition Language".
pidl can generate stubs for DCE/RPC server code, DCE/RPC
client code and Wireshark dissectors for DCE/RPC traffic.
IDL compilers like pidl take a description
of an interface as their input and use it to generate C
(though support for other languages may be added later) code that
can use these interfaces, pretty print data sent
using these interfaces, or even generate Wireshark
dissectors that can parse data sent over the
wire by these interfaces.
pidl takes IDL files in the same format as is used by midl,
converts it to a .pidl file (which contains pidl's internal representation of the interface) and can then generate whatever output you need.
.pidl files should be used for debugging purposes only. Write your
interface definitions in .idl format.
The goal of pidl is to implement a IDL compiler that can be used
while developing the RPC subsystem in Samba (for
both marshalling/unmarshalling and debugging purposes).
=head1 OPTIONS
=over 4
=item I<--help>
Show list of available options.
=item I<--version>
Show pidl version
=item I<--outputdir OUTNAME>
Write output files to the specified directory. Defaults to the current
directory.
=item I<--includedir DIR>
Add DIR to the search path used by the preprocessor. This option can be
specified multiple times.
=item I<--parse-idl-tree>
Read internal tree structure from input files rather
than assuming they contain IDL.
=item I<--dump-idl>
Generate a new IDL file. File will be named OUTNAME.idl.
=item I<--header>
Generate a C header file for the specified interface. Filename defaults to OUTNAME.h.
=item I<--ndr-parser>
Generate a C file and C header containing NDR parsers. The filename for
the parser defaults to ndr_OUTNAME.c. The header filename will be the
parser filename with the extension changed from .c to .h.
=item I<--tdr-parser>
Generate a C file and C header containing TDR parsers. The filename for
the parser defaults to tdr_OUTNAME.c. The header filename will be the
parser filename with the extension changed from .c to .h.
=item I<--typelib>
Write type information to the specified file.
=item I<--server>
Generate boilerplate for the RPC server that implements
the interface. Filename defaults to ndr_OUTNAME_s.c.
=item I<--template>
Generate stubs for a RPC server that implements the interface. Output will
be written to stdout.
=item I<--ws-parser>
Generate an Wireshark dissector (in C) and header file. The dissector filename
defaults to packet-dcerpc-OUTNAME.c while the header filename defaults to
packet-dcerpc-OUTNAME.h.
Pidl will read additional data from an Wireshark conformance file if present.
Such a file should have the same location as the IDL file but with the
extension I<cnf> rather than I<idl>. See L<Parse::Pidl::Wireshark::Conformance>
for details on the format of this file.
=item I<--diff>
Parse an IDL file, generate a new IDL file based on the internal data
structures and see if there are any differences with the original IDL file.
Useful for debugging pidl.
=item I<--dump-idl-tree>
Tell pidl to dump the internal tree representation of an IDL
file the to disk. Useful for debugging pidl.
=item I<--dump-ndr-tree>
Tell pidl to dump the internal NDR information tree it generated
from the IDL file to disk. Useful for debugging pidl.
=item I<--samba3-ndr-client>
Generate client calls for Samba3, to be placed in rpc_client/. Instead of
calling out to the code in Samba3's rpc_parse/, this will call out to
Samba4's NDR code instead.
=item I<--samba3-ndr-server>
Generate server calls for Samba3, to be placed in rpc_server/. Instead of
calling out to the code in Samba3's rpc_parse/, this will call out to
Samba4's NDR code instead.
=back
=head1 IDL SYNTAX
IDL files are always preprocessed using the C preprocessor.
Pretty much everything in an interface (the interface itself, functions,
parameters) can have attributes (or properties whatever name you give them).
Attributes always prepend the element they apply to and are surrounded
by square brackets ([]). Multiple attributes are separated by comma's;
arguments to attributes are specified between parentheses.
See the section COMPATIBILITY for the list of attributes that
pidl supports.
C-style comments can be used.
=head2 CONFORMANT ARRAYS
A conformant array is one with that ends in [*] or []. The strange
things about conformant arrays are that they can only appear as the last
element of a structure (unless there is a pointer to the conformant array,
of course) and the array size appears before the structure itself on the wire.
So, in this example:
typedef struct {
long abc;
long count;
long foo;
[size_is(count)] long s[*];
} Struct1;
it appears like this:
[size_is] [abc] [count] [foo] [s...]
the first [size_is] field is the allocation size of the array, and
occurs before the array elements and even before the structure
alignment.
Note that size_is() can refer to a constant, but that doesn't change
the wire representation. It does not make the array a fixed array.
midl.exe would write the above array as the following C header:
typedef struct {
long abc;
long count;
long foo;
long s[1];
} Struct1;
pidl takes a different approach, and writes it like this:
typedef struct {
long abc;
long count;
long foo;
long *s;
} Struct1;
=head2 VARYING ARRAYS
A varying array looks like this:
typedef struct {
long abc;
long count;
long foo;
[size_is(count)] long *s;
} Struct1;
This will look like this on the wire:
[abc] [count] [foo] [PTR_s] [count] [s...]
=head2 FIXED ARRAYS
A fixed array looks like this:
typedef struct {
long s[10];
} Struct1;
The NDR representation looks just like 10 separate long
declarations. The array size is not encoded on the wire.
pidl also supports "inline" arrays, which are not part of the IDL/NDR
standard. These are declared like this:
typedef struct {
uint32 foo;
uint32 count;
uint32 bar;
long s[count];
} Struct1;
This appears like this:
[foo] [count] [bar] [s...]
Fixed arrays are an extension added to support some of the strange
embedded structures in security descriptors and spoolss.
This section is by no means complete. See the OpenGroup and MSDN
documentation for additional information.
=head1 COMPATIBILITY WITH MIDL
=head2 Missing features in pidl
The following MIDL features are not (yet) implemented in pidl
or are implemented with an incompatible interface:
=over
=item *
Asynchronous communication
=item *
Typelibs (.tlb files)
=item *
Datagram support (ncadg_*)
=back
=head2 Supported attributes and statements
in, out, ref, length_is, switch_is, size_is, uuid, case, default, string,
unique, ptr, pointer_default, v1_enum, object, helpstring, range, local,
call_as, endpoint, switch_type, progid, coclass, iid_is, represent_as,
transmit_as, import, include, cpp_quote.
=head2 PIDL Specific properties
=over 4
=item public
The [public] property on a structure or union is a pidl extension that
forces the generated pull/push functions to be non-static. This allows
you to declare types that can be used between modules. If you don't
specify [public] then pull/push functions for other than top-level
functions are declared static.
=item noprint
The [noprint] property is a pidl extension that allows you to specify
that pidl should not generate a ndr_print_*() function for that
structure or union. This is used when you wish to define your own
print function that prints a structure in a nicer manner. A good
example is the use of [noprint] on dom_sid, which allows the
pretty-printing of SIDs.
=item value
The [value(expression)] property is a pidl extension that allows you
to specify the value of a field when it is put on the wire. This
allows fields that always have a well-known value to be automatically
filled in, thus making the API more programmer friendly. The
expression can be any C expression.
=item relative
The [relative] property can be supplied on a pointer. When it is used
it declares the pointer as a spoolss style "relative" pointer, which
means it appears on the wire as an offset within the current
encapsulating structure. This is not part of normal IDL/NDR, but it is
a very useful extension as it avoids the manual encoding of many
complex structures.
=item subcontext(length)
Specifies that a size of I<length>
bytes should be read, followed by a blob of that size,
which will be parsed as NDR.
subcontext() is deprecated now, and should not be used in new code.
Instead, use represent_as() or transmit_as().
=item flag
Specify boolean options, mostly used for
low-level NDR options. Several options
can be specified using the | character.
Note that flags are inherited by substructures!
=item nodiscriminant
The [nodiscriminant] property on a union means that the usual uint16
discriminent field at the start of the union on the wire is
omitted. This is not normally allowed in IDL/NDR, but is used for some
spoolss structures.
=item charset(name)
Specify that the array or string uses the specified
charset. If this attribute is specified, pidl will
take care of converting the character data from this format
to the host format. Commonly used values are UCS2, DOS and UTF8.
=back
=head2 Unsupported MIDL properties or statements
aggregatable, appobject, async_uuid, bindable, control,
defaultbind, defaultcollelem, defaultvalue, defaultvtable, dispinterface,
displaybind, dual, entry, first_is, helpcontext, helpfile, helpstringcontext,
helpstringdll, hidden, idl_module, idl_quote, id, immediatebind, importlib,
includelib, last_is, lcid, licensed, max_is, module,
ms_union, no_injected_text, nonbrowsable, noncreatable, nonextensible, odl,
oleautomation, optional, pragma, propget, propputref, propput, readonly,
requestedit, restricted, retval, source, uidefault,
usesgetlasterror, vararg, vi_progid, wire_marshal.
=head1 EXAMPLES
# Generating an Wireshark parser
$ ./pidl --ws-parser -- atsvc.idl
# Generating a TDR parser and header
$ ./pidl --tdr-parser --header -- regf.idl
# Generating a Samba3 client and server
$ ./pidl --samba3-ndr-client --samba3-ndr-server -- dfs.idl
# Generating a Samba4 NDR parser, client and server
$ ./pidl --ndr-parser --ndr-client --ndr-server -- samr.idl
=head1 SEE ALSO
L<https://msdn.microsoft.com/en-us/library/windows/desktop/aa373864%28v=vs.85%29.aspx>
L<https://gitlab.com/wireshark/wireshark/-/wikis/DCE/RPC>,
L<https://www.samba.org/>,
L<yapp(1)>
=head1 LICENSE
pidl is licensed under the GNU General Public License L<https://www.gnu.org/licenses/gpl.html>.
=head1 AUTHOR
pidl was written by Andrew Tridgell, Stefan Metzmacher, Tim Potter and Jelmer
Vernooij. The current maintainer is Jelmer Vernooij.
This manpage was written by Jelmer Vernooij, partially based on the original
pidl README by Andrew Tridgell.
=cut
use strict;
use FindBin qw($RealBin $Script);
use lib "$RealBin/lib";
use Getopt::Long;
use File::Basename;
use Parse::Pidl qw ( $VERSION );
use Parse::Pidl::Util;
use Parse::Pidl::ODL;
#####################################################################
# save a data structure into a file
sub SaveStructure($$)
{
my($filename,$v) = @_;
FileSave($filename, Parse::Pidl::Util::MyDumper($v));
}
#####################################################################
# load a data structure from a file (as saved with SaveStructure)
sub LoadStructure($)
{
my $f = shift;
my $contents = FileLoad($f);
defined $contents || return undef;
return eval "$contents";
}
#####################################################################
# read a file into a string
sub FileLoad($)
{
my($filename) = shift;
local(*INPUTFILE);
open(INPUTFILE, $filename) || return undef;
my($saved_delim) = $/;
undef $/;
my($data) = <INPUTFILE>;
close(INPUTFILE);
$/ = $saved_delim;
return $data;
}
#####################################################################
# write a string into a file
sub FileSave($$)
{
my($filename) = shift;
my($v) = shift;
local(*FILE);
open(FILE, ">$filename") || die "can't open $filename";
print FILE $v;
close(FILE);
}
my(@opt_incdirs) = ();
my($opt_help) = 0;
my($opt_version) = 0;
my($opt_parse_idl_tree) = 0;
my($opt_dump_idl_tree);
my($opt_dump_ndr_tree);
my($opt_dump_idl) = 0;
my($opt_diff) = 0;
my($opt_header);
my($opt_samba3_header);
my($opt_samba3_parser);
my($opt_samba3_server);
my($opt_samba3_ndr_client);
my($opt_samba3_ndr_server);
my($opt_samba3_template) = 0;
my($opt_template) = 0;
my($opt_client);
my($opt_typelib);
my($opt_server);
my($opt_ndr_parser);
my($opt_tdr_parser);
my($opt_ws_parser);
my($opt_python);
my($opt_quiet) = 0;
my($opt_outputdir) = '.';
my($opt_verbose) = 0;
my($opt_warn_compat) = 0;
my($opt_dcom_proxy);
my($opt_com_header);
#########################################
# display help text
sub ShowHelp()
{
print "perl IDL parser and code generator\n";
ShowVersion();
print"
Copyright (C) Andrew Tridgell <tridge\@samba.org>
Copyright (C) Jelmer Vernooij <jelmer\@samba.org>
Usage: $Script [options] [--] <idlfile> [<idlfile>...]
Generic Options:
--help this help page
--version show pidl version
--outputdir=OUTDIR put output in OUTDIR/ [.]
--warn-compat warn about incompatibility with other compilers
--quiet be quiet
--verbose be verbose
--includedir DIR search DIR for included files
Debugging:
--dump-idl-tree[=FILE] dump internal representation to file [BASENAME.pidl]
--parse-idl-tree read internal representation instead of IDL
--dump-ndr-tree[=FILE] dump internal NDR data tree to file [BASENAME.ndr]
--dump-idl regenerate IDL file
--diff run diff on original IDL and dumped output
--typelib print type information
Samba 4 output:
--header[=OUTFILE] create generic header file [BASENAME.h]
--ndr-parser[=OUTFILE] create a C NDR parser [ndr_BASENAME.c]
--client[=OUTFILE] create a C NDR client [ndr_BASENAME_c.c]
--tdr-parser[=OUTFILE] create a C TDR parser [tdr_BASENAME.c]
--python[=OUTFILE] create python wrapper file [py_BASENAME.c]
--server[=OUTFILE] create server boilerplate [ndr_BASENAME_s.c]
--template print a template for a pipe
--dcom-proxy[=OUTFILE] create DCOM proxy [ndr_BASENAME_p.c]
--com-header[=OUTFILE] create header for COM [com_BASENAME.h]
Samba 3 output:
--samba3-ndr-client[=OUTF] create client calls for Samba3
using Samba4's NDR code [cli_BASENAME.c]
--samba3-ndr-server[=OUTF] create server call wrapper for Samba3
using Samba4's NDR code [srv_BASENAME.c]
--samba3-template print a template for a pipe
Wireshark parsers:
--ws-parser[=OUTFILE] create Wireshark parser and header
\n";
exit(0);
}
#########################################
# Display version
sub ShowVersion()
{
print "perl IDL version $VERSION\n";
}
# main program
my $result = GetOptions (
'help|h|?' => \$opt_help,
'version' => \$opt_version,
'outputdir=s' => \$opt_outputdir,
'dump-idl' => \$opt_dump_idl,
'dump-idl-tree:s' => \$opt_dump_idl_tree,
'parse-idl-tree' => \$opt_parse_idl_tree,
'dump-ndr-tree:s' => \$opt_dump_ndr_tree,
'samba3-ndr-client:s' => \$opt_samba3_ndr_client,
'samba3-ndr-server:s' => \$opt_samba3_ndr_server,
'samba3-template' => \$opt_samba3_template,
'header:s' => \$opt_header,
'server:s' => \$opt_server,
'typelib:s' => \$opt_typelib,
'tdr-parser:s' => \$opt_tdr_parser,
'template' => \$opt_template,
'ndr-parser:s' => \$opt_ndr_parser,
'client:s' => \$opt_client,
'ws-parser:s' => \$opt_ws_parser,
'python' => \$opt_python,
'diff' => \$opt_diff,
'dcom-proxy:s' => \$opt_dcom_proxy,
'com-header:s' => \$opt_com_header,
'quiet' => \$opt_quiet,
'verbose' => \$opt_verbose,
'warn-compat' => \$opt_warn_compat,
'includedir=s@' => \@opt_incdirs
);
if (not $result) {
exit(1);
}
if ($opt_help) {
ShowHelp();
exit(0);
}
if ($opt_version) {
ShowVersion();
exit(0);
}
sub process_file($)
{
my $idl_file = shift;
my $outputdir = $opt_outputdir;
my $pidl;
my $ndr;
my $basename = basename($idl_file, ".idl");
unless ($opt_quiet) { print "Compiling $idl_file\n"; }
if ($opt_parse_idl_tree) {
$pidl = LoadStructure($idl_file);
defined $pidl || die "Failed to load $idl_file";
} else {
require Parse::Pidl::IDL;
$pidl = Parse::Pidl::IDL::parse_file($idl_file, \@opt_incdirs);
defined $pidl || die "Failed to parse $idl_file";
}
require Parse::Pidl::Typelist;
Parse::Pidl::Typelist::LoadIdl($pidl, $basename);
if (defined($opt_dump_idl_tree)) {
my($pidl_file) = ($opt_dump_idl_tree or "$outputdir/$basename.pidl");
SaveStructure($pidl_file, $pidl) or die "Failed to save $pidl_file\n";
}
if ($opt_dump_idl) {
require Parse::Pidl::Dump;
print Parse::Pidl::Dump($pidl);
}
if ($opt_diff) {
my($tempfile) = "$outputdir/$basename.tmp";
FileSave($tempfile, IdlDump::Dump($pidl));
system("diff -wu $idl_file $tempfile");
unlink($tempfile);
}
my $comh_filename = ($opt_com_header or "$outputdir/com_$basename.h");
if (defined($opt_com_header)) {
require Parse::Pidl::Samba4::COM::Header;
my $res = Parse::Pidl::Samba4::COM::Header::Parse($pidl,"$outputdir/ndr_$basename.h");
if ($res) {
FileSave($comh_filename, $res);
}
}
if (defined($opt_dcom_proxy)) {
require Parse::Pidl::Samba4::COM::Proxy;
my $res = Parse::Pidl::Samba4::COM::Proxy::Parse($pidl,$comh_filename);
if ($res) {
my ($client) = ($opt_dcom_proxy or "$outputdir/$basename\_p.c");
FileSave($client, $res);
}
}
if ($opt_warn_compat) {
require Parse::Pidl::Compat;
Parse::Pidl::Compat::Check($pidl);
}
$pidl = Parse::Pidl::ODL::ODL2IDL($pidl, dirname($idl_file), \@opt_incdirs);
if (defined($opt_ws_parser)) {
require Parse::Pidl::Wireshark::NDR;
my $cnffile = $idl_file;
$cnffile =~ s/\.idl$/\.cnf/;
my $generator = new Parse::Pidl::Wireshark::NDR();
$generator->Initialize($cnffile);
}
if (defined($opt_ws_parser) or
defined($opt_client) or
defined($opt_server) or
defined($opt_header) or
defined($opt_ndr_parser) or
defined($opt_python) or
defined($opt_dump_ndr_tree) or
defined($opt_samba3_header) or
defined($opt_samba3_parser) or
defined($opt_samba3_server) or
defined($opt_samba3_ndr_client) or
defined($opt_samba3_ndr_server)) {
require Parse::Pidl::NDR;
$ndr = Parse::Pidl::NDR::Parse($pidl);
}
if (defined($opt_dump_ndr_tree)) {
my($ndr_file) = ($opt_dump_ndr_tree or "$outputdir/$basename.ndr");
SaveStructure($ndr_file, $ndr) or die "Failed to save $ndr_file\n";
}
my $gen_header = ($opt_header or "$outputdir/$basename.h");
if (defined($opt_header)) {
require Parse::Pidl::Samba4::Header;
FileSave($gen_header, Parse::Pidl::Samba4::Header::Parse($ndr));
}
my $h_filename = "$outputdir/ndr_$basename.h";
my $c_header = "$outputdir/ndr_$basename\_c.h";
if (defined($opt_client) or defined($opt_samba3_ndr_client)) {
require Parse::Pidl::Samba4::NDR::Client;
my ($c_client) = ($opt_client or "$outputdir/ndr_$basename\_c.c");
$c_header = $c_client;
$c_header =~ s/\.c$/.h/;
my $generator = new Parse::Pidl::Samba4::NDR::Client();
my ($srcd,$hdrd) = $generator->Parse(
$ndr,$gen_header,$h_filename,$c_header);
FileSave($c_client, $srcd);
FileSave($c_header, $hdrd);
}
if (defined($opt_python)) {
require Parse::Pidl::Samba4::Python;
my $generator = new Parse::Pidl::Samba4::Python();
my ($prsr) = $generator->Parse($basename, $ndr,
"$outputdir/ndr_$basename\_c.h", $h_filename);
FileSave("$outputdir/py_$basename.c", $prsr);
}
if (defined($opt_server)) {
require Parse::Pidl::Samba4::NDR::Server;
FileSave(($opt_server or "$outputdir/ndr_$basename\_s.c"), Parse::Pidl::Samba4::NDR::Server::Parse($ndr,$h_filename));
}
if (defined($opt_ndr_parser)) {
my $parser_fname = ($opt_ndr_parser or "$outputdir/ndr_$basename.c");
require Parse::Pidl::Samba4::NDR::Parser;
my $generator = new Parse::Pidl::Samba4::NDR::Parser();
my ($header,$parser) = $generator->Parse($ndr, $gen_header, $h_filename);
FileSave($parser_fname, $parser);
FileSave($h_filename, $header);
}
if (defined($opt_ws_parser)) {
require Parse::Pidl::Wireshark::NDR;
my($eparser) = ($opt_ws_parser or "$outputdir/packet-dcerpc-$basename.c");
my $eheader = $eparser;
$eheader =~ s/\.c$/\.h/;
my $cnffile = $idl_file;
$cnffile =~ s/\.idl$/\.cnf/;
my $generator = new Parse::Pidl::Wireshark::NDR();
my ($dp, $dh) = $generator->Parse($ndr, $idl_file, $eheader, $cnffile);
FileSave($eparser, $dp) if defined($dp);
FileSave($eheader, $dh) if defined($dh);
}
if (defined($opt_tdr_parser)) {
my $tdr_parser = ($opt_tdr_parser or "$outputdir/tdr_$basename.c");
my $tdr_header = $tdr_parser;
$tdr_header =~ s/\.c$/\.h/;
require Parse::Pidl::Samba4::TDR;
my $generator = new Parse::Pidl::Samba4::TDR();
my ($hdr,$prsr) = $generator->Parser($pidl, $tdr_header, $gen_header);
FileSave($tdr_parser, $prsr);
FileSave($tdr_header, $hdr);
}
if (defined($opt_typelib)) {
my $typelib = ($opt_typelib or "$outputdir/$basename.tlb");
require Parse::Pidl::Typelist;
FileSave($typelib, Parse::Pidl::Typelist::GenerateTypeLib());
}
if ($opt_template) {
require Parse::Pidl::Samba4::Template;
print Parse::Pidl::Samba4::Template::Parse($pidl);
}
if ($opt_samba3_template) {
require Parse::Pidl::Samba3::Template;
print Parse::Pidl::Samba3::Template::Parse($pidl);
}
if (defined($opt_samba3_ndr_client)) {
my $client = ($opt_samba3_ndr_client or "$outputdir/cli_$basename.c");
my $header = $client; $header =~ s/\.c$/\.h/;
require Parse::Pidl::Samba3::ClientNDR;
my $generator = new Parse::Pidl::Samba3::ClientNDR();
my ($c_code,$h_code) = $generator->Parse($ndr, $header, $c_header);
FileSave($client, $c_code);
FileSave($header, $h_code);
}
if (defined($opt_samba3_ndr_server)) {
my $server = ($opt_samba3_ndr_server or "$outputdir/srv_$basename.c");
my $header = $server; $header =~ s/\.c$/\.h/;
require Parse::Pidl::Samba3::ServerNDR;
my ($c_code,$h_code) = Parse::Pidl::Samba3::ServerNDR::Parse($ndr, $header, $h_filename);
FileSave($server, $c_code);
FileSave($header, $h_code);
}
}
if (scalar(@ARGV) == 0) {
print "$Script: no input files\n";
exit(1);
}
process_file($_) foreach (@ARGV); |
|
wireshark/tools/pidl/README | Introduction:
=============
This directory contains the source code of the pidl (Perl IDL)
compiler for Samba 4.
The main sources for pidl are available using Git as part of
the Samba source tree. Use:
git clone git://git.samba.org/samba.git
Pidl works by building a parse tree from a .pidl file (a simple
dump of it's internal parse tree) or a .idl file
(a file format mostly like the IDL file format midl uses).
The IDL file parser is in idl.yp (a yacc file converted to
perl code by yapp)
Standalone installation:
========================
Run Makefile.PL to generate the Makefile.
Then run "make install" (as root) to install.
Internals overview:
===================
After a parse tree is present, pidl will call one of it's backends
(which one depends on the options given on the command-line). Here is
a list of current backends:
-- Generic --
Parse::Pidl::Dump - Converts the parse tree back to an IDL file
Parse::Pidl::Samba4::Header - Generates header file with data structures defined in IDL file
Parse::Pidl::NDR - Generates intermediate datastructures for use by NDR parses/generators
Parse::Pidl::ODL - Generates IDL structures from ODL structures for use in the NDR parser generator
Parse::Pidl::Test - Utility functions for use in pidl's testsuite
-- Samba NDR --
Parse::Pidl::Samba4::NDR::Client - Generates client call functions in C using the NDR parser
Parse::Pidl::Samba4::NDR::Parser - Generates pull/push functions for parsing NDR
Parse::Pidl::Samba4::NDR::Server - Generates server side implementation in C
Parse::Pidl::Samba4::TDR - Parser generator for the "Trivial Data Representation"
Parse::Pidl::Samba4::Template - Generates stubs in C for server implementation
Parse::Pidl::Samba4::Python - Generates bindings for Python
-- Samba COM / DCOM --
Parse::Pidl::Samba4::COM::Proxy - Generates proxy object for DCOM (client-side)
Parse::Pidl::Samba4::COM::Stub - Generates stub call handler for DCOM (server-side)
Parse::Pidl::Samba4::COM::Header - Generates headers for COM
-- Wireshark --
Parse::Pidl::Wireshark::NDR - Generates a parser for the Wireshark network sniffer
Parse::Pidl::Wireshark::Conformance - Reads conformance files containing additional data for generating Wireshark parsers
-- Utility modules --
Parse::Pidl::Util - Misc utility functions used by *.pm and pidl.pl
Parse::Pidl::Typelist - Utility functions for keeping track of known types and their representation in C
Tips for hacking on pidl:
- Inspect pidl's parse tree by using the --keep option and looking at the
generated .pidl file.
- The various backends have a lot in common, if you don't understand how one
implements something, look at the others.
- See pidl(1) and the documentation on midl
- See 'info bison' and yapp(1) for information on the file format of idl.yp
- Run the tests (all in tests/) |
|
wireshark/tools/pidl/TODO | - warn when union instances don't have a discriminant
- true multiple dimension array / strings in arrays support
- compatibility mode for generating MIDL-readable data:
- strip out pidl-specific properties
- make bitmap an optional attribute on enum
- support nested elements
- support typedefs properly (e.g. allow "typedef void **bla;")
- make typedefs generate real typedefs
- improve represent_as(): allow it to be used for arrays and other complex
types
- --explain-ndr option that dumps out parse tree ?
- separate tables for NDR and DCE/RPC
- maybe no tables for NDR at all? we only need them for ndrdump
and that can use dlsym()
- allow data structures outside of interfaces
- mem_ctx in the interface rather than as struct ndr member.
- real typelibs
- fix [in,out] handling and allocation for samba3:
- add inout
- make NULL to mean "allocate me"
- remove NDR_AUTO_REF_ALLOC flag
- automatic test generator based on IDL pointer types
- support converting structs to tuples in Python rather than objects
- convert structs with a single mattering member to that member directly, e.g.:
struct bar {
int size;
[size_is(size)] uint32 *array;
};
should be converted to an array of uint32's
- python: fill in size members automatically in some places if the struct isn't being returned
(so we don't have to cope with the array growing) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.