repo
stringlengths 5
75
| commit
stringlengths 40
40
| message
stringlengths 6
18.2k
| diff
stringlengths 60
262k
|
---|---|---|---|
redhotpenguin/Class-Factory | dca64152c6f5d00c999268dbb9a2283b32415f6b | renamed from ../../Factory.pm | diff --git a/lib/Class/Factory.pm b/lib/Class/Factory.pm
new file mode 100644
index 0000000..93e5b90
--- /dev/null
+++ b/lib/Class/Factory.pm
@@ -0,0 +1,147 @@
+package Class::Factory;
+
+# $Id$
+
+use strict;
+
+$Class::Factory::VERSION = '0.01';
+
+sub get_factory_class {
+ my ( $item, $factory_type ) = @_;
+ my $class = ref $item || $item;
+ my $map = $item->get_factory_map;
+ my $factory_class = ( ref $map eq 'HASH' )
+ ? $map->{ $factory_type }
+ : $item->get_factory_type( $factory_type );
+ unless ( $factory_class ) {
+ die "Factory type [$factory_type] is not defined in [$class]\n";
+ }
+ return $factory_class;
+}
+
+
+sub add_factory_type {
+ my ( $item, $factory_type, $factory_class ) = @_;
+ my $class = ref $item || $item;
+ unless ( $factory_type ) {
+ die "Cannot add factory type to [$class]: no type defined\n";
+ }
+ unless ( $factory_class ) {
+ die "Cannot add factory type [$factory_type] to [$class]: no class defined\n";
+ }
+
+ my $factory_class = $item->get_factory_type( $factory_type );
+ if ( $factory_class ) {
+ warn "Attempt to add type [$factory_type] to [$class] redundant; ",
+ "type already exists with class [$class]\n";
+ return;
+ }
+
+ eval "require $factory_class";
+ if ( $@ ) {
+ die "Cannot add factory type [$factory_type] to class [$class]: ",
+ "factory class [$factory_class] cannot be required [$@]\n";
+ }
+ my $map = $item->get_factory_map;
+ if ( ref $map eq 'HASH' ) {
+ $map->{ $factory_type } = $factory_class;
+ }
+ else {
+ $item->set_factory_type( $factory_type, $factory_class );
+ }
+ return $factory_class;
+}
+
+
+########################################
+# INTERFACE
+
+# We don't die when these are called because the subclass can define
+# either A + B or C
+
+sub get_factory_type { return undef }
+sub set_factory_type { return undef }
+sub get_factory_map { return undef }
+
+1;
+
+__END__
+
+
+=head1 NAME
+
+Class::Factory - Base class for factory classes
+
+=head1 SYNOPSIS
+
+ package My::Factory;
+
+ use base qw( Class::Factory );
+
+ my %TYPES = ();
+
+ sub new {
+ my ( $class, $type, $params ) = @_;
+ my $factory_class = $class->get_factory_class( $type );
+ return bless( $params, $factory_class );
+ }
+
+ # SIMPLE: Let the parent know about our types
+
+ sub get_factory_map { return \%TYPES }
+
+ # FLEXIBLE: Let the parent know about our types
+
+ sub get_factory_type {
+ my ( $class, $type ) = @_;
+ return $TYPES{ $type };
+ }
+
+ sub set_factory_type {
+ my ( $class, $type, $factory_class ) = @_;
+ $TYPES{ $type } = $factory_class;
+ }
+
+ # Add our default types
+
+ My::Factory->add_factory_type( perl => 'My::Factory::Perl' );
+ My::Factory->add_factory_type( blech => 'My::Factory::Blech' );
+
+ 1;
+
+ # Adding a new factory type in code
+
+ My::Factory->add_factory_type( custom => 'Other::Custom::Class' );
+ my $custom_object = My::Factory->new( 'custom', { this => 'that' } );
+
+=head1 DESCRIPTION
+
+This is a simple module that factory classes can use to generate new
+types of objects on the fly. The base class defines two methods for
+subclasses to use: C<get_factory_class()> and
+C<add_factory_type()>. Subclasses must define either
+C<get_factory_map()> or both C<get_factory_type()> and
+C<set_factory_type()>.
+
+=head1 METHODS
+
+B<get_factory_class( $factory_type )>
+
+B<add_factory_type( $factory_type, $factory_class )>
+
+=head1 COPYRIGHT
+
+Copyright (c) 2002 Chris Winters. All rights reserved.
+
+This library is free software; you can redistribute it and/or modify
+it under the same terms as Perl itself.
+
+=head1 SEE ALSO
+
+L<perl>.
+
+=head1 AUTHOR
+
+Chris Winters <[email protected]>
+
+=cut
|
redhotpenguin/Class-Factory | b9baabdd6e1b77e3a1ef02875cc113c031741dec | Initial revision | diff --git a/Changes b/Changes
new file mode 100644
index 0000000..db6a14d
--- /dev/null
+++ b/Changes
@@ -0,0 +1,6 @@
+Revision history for Perl extension Class::Factory.
+
+0.01 Mon Jan 28 08:35:09 2002
+ - original version; created by h2xs 1.21 with options
+ -A -X -n Class::Factory
+
diff --git a/Factory.pm b/Factory.pm
new file mode 100644
index 0000000..8277167
--- /dev/null
+++ b/Factory.pm
@@ -0,0 +1,128 @@
+package Class::Factory;
+
+use strict;
+
+$Class::Factory::VERSION = '0.01';
+
+sub get_factory_class {
+ my ( $item, $factory_type ) = @_;
+ my $class = ref $item || $item;
+ my $factory_class = $item->get_factory_type( $factory_type );
+ unless ( $factory_class ) {
+ die "Factory type [$factory_type] is not defined in [$class]\n";
+ }
+ return $factory_class;
+}
+
+
+sub add_factory_type {
+ my ( $item, $factory_type, $factory_class ) = @_;
+ my $class = ref $item || $item;
+ unless ( $factory_type ) {
+ die "Cannot add factory type to [$class]: no type defined\n";
+ }
+ unless ( $factory_class ) {
+ die "Cannot add factory type [$factory_type] to [$class]: no class defined\n";
+ }
+
+ my $factory_class = $item->get_factory_type( $factory_type );
+ if ( $factory_class ) {
+ warn "Attempt to add type [$factory_type] to [$class] redundant; ",
+ "type already exists with class [$class]\n";
+ return;
+ }
+
+ eval "require $factory_class";
+ if ( $@ ) {
+ die "Cannot add factory type [$factory_type] to class [$class]: ",
+ "factory class [$factory_class] cannot be required [$@]\n";
+ }
+ $item->set_factory_type( $factory_type, $factory_class );
+ return $factory_class;
+}
+
+
+########################################
+# INTERFACE
+
+sub get_factory_type {
+ my ( $item ) = @_;
+ my $class = ref $item || $item;
+ die "Class [$class] must define get_factory_type()";
+}
+
+sub set_factory_type {
+ my ( $item ) = @_;
+ my $class = ref $item || $item;
+ die "Class [$class] must define set_factory_type()";
+}
+
+1;
+
+__END__
+
+
+=head1 NAME
+
+Class::Factory - Base class for factory classes
+
+=head1 SYNOPSIS
+
+ package My::Factory;
+
+ use base qw( Class::Factory );
+
+ my %TYPES = ();
+
+ sub new {
+ my ( $class, $type, $params ) = @_;
+ my $factory_class = $class->get_factory_class( $type );
+ return bless( $params, $factory_class );
+ }
+
+ # Let the parent know about our types
+
+ sub get_factory_type {
+ my ( $class, $type ) = @_;
+ return $TYPES{ $type };
+ }
+
+ sub set_factory_type {
+ my ( $class, $type, $factory_class ) = @_;
+ $TYPES{ $type } = $factory_class;
+ }
+
+ My::Factory->add_factory_type( perl => 'My::Factory::Perl' );
+ My::Factory->add_factory_type( blech => 'My::Factory::Blech' );
+
+ 1;
+
+ # Adding a new factory type in code
+
+ My::Factory->add_factory_type( custom => 'Other::Custom::Class' );
+ my $custom_object = My::Factory->new( 'custom', { this => 'that' } );
+
+=head1 DESCRIPTION
+
+Simple module that factory classes can use to generate new types of
+objects on the fly. The base class defines two methods for subclasses
+to use: C<get_factory_class()> and C<add_factory_type()>. Subclasses
+must define two methods: C<get_factory_type()> and
+C<set_factory_type()>.
+
+=head1 COPYRIGHT
+
+Copyright (c) 2002 Chris Winters. All rights reserved.
+
+This library is free software; you can redistribute it and/or modify
+it under the same terms as Perl itself.
+
+=head1 SEE ALSO
+
+L<perl>.
+
+=head1 AUTHOR
+
+Chris Winters <[email protected]>
+
+=cut
diff --git a/MANIFEST b/MANIFEST
new file mode 100644
index 0000000..70c8250
--- /dev/null
+++ b/MANIFEST
@@ -0,0 +1,6 @@
+Changes
+Factory.pm
+Makefile.PL
+MANIFEST
+README
+test.pl
diff --git a/Makefile.PL b/Makefile.PL
new file mode 100644
index 0000000..08e8a63
--- /dev/null
+++ b/Makefile.PL
@@ -0,0 +1,12 @@
+use ExtUtils::MakeMaker;
+
+# See lib/ExtUtils/MakeMaker.pm for details of how to influence
+# the contents of the Makefile that is written.
+WriteMakefile(
+ 'NAME' => 'Class::Factory',
+ 'VERSION_FROM' => 'Factory.pm', # finds $VERSION
+ 'PREREQ_PM' => {},
+ ($] >= 5.005 ?
+ (ABSTRACT_FROM => 'Factory.pm',
+ AUTHOR => 'Chris Winters <[email protected]') : ()),
+);
diff --git a/README b/README
new file mode 100644
index 0000000..5f577c1
--- /dev/null
+++ b/README
@@ -0,0 +1,35 @@
+Class/Factory version 0.01
+==========================
+
+The README is used to introduce the module and provide instructions on
+how to install the module, any machine dependencies it may have (for
+example C compilers and installed libraries) and any other information
+that should be provided before the module is installed.
+
+A README file is required for CPAN modules since CPAN extracts the
+README file from a module distribution so that people browsing the
+archive can use it get an idea of the modules uses. It is usually a
+good idea to provide version information here so that people can
+decide whether fixes for the module are worth downloading.
+
+INSTALLATION
+
+To install this module type the following:
+
+ perl Makefile.PL
+ make
+ make test
+ make install
+
+DEPENDENCIES
+
+This module requires these other modules and libraries:
+
+ blah blah blah
+
+COPYRIGHT AND LICENCE
+
+Put the correct copyright and licence information here.
+
+Copyright (C) 2002 A. U. Thor blah blah blah
+
|
espra/espra | ab9deca92ecdc45195890222b559d9b93fd7c514 | ai-rules: add AI-assisted development guidelines | diff --git a/.ai-rules.md b/.ai-rules.md
new file mode 100644
index 0000000..2c75bc4
--- /dev/null
+++ b/.ai-rules.md
@@ -0,0 +1,283 @@
+# Guidelines for AI-Assisted Development
+
+## Overview
+
+**Project Name:** Espra.
+
+**Description:** Espra is a new decentralized platform that combines an Onchain
+UI Browser with a Hyperchain and a novel Common Economic Protocol.
+
+## Repo Layout
+
+This is a monorepo for the various aspects of Espra. Key top-level directories
+are:
+
+- `cmd/`
+
+ - Each sub-directory of `cmd/` is a separate Rust binary crate corresponding
+ to a single binary.
+
+ - The package name must be the exact same as the sub-directory name.
+
+- `doc/`
+
+ - Houses any documentation relating to the project.
+
+- `environ/`
+
+ - Houses any scripts used to set up various environments.
+
+- `lib/`
+
+ - Each sub-directory is an internal Rust library crate corresponding to a
+ single library.
+
+ - The package name for the library is prefixed with `espra-`, but the crate is
+ used through the rest of the code base without the prefix, and with hyphens
+ replaced with underscores.
+
+ For example, if there was a `font-parser` sub-directory within `lib/`, then
+ its package name would be `espra-font-parser`, and it will be imported
+ within the rest of the code base as:
+
+ ```rust
+ use font_parser;
+ ```
+
+Do not use plural words in any directory names within the repo.
+
+## Cargo Workspaces
+
+We use Cargo Workspaces to manage the different crates within the monorepo. The
+root `Cargo.toml` file contains all of the dependencies specified in a single
+place.
+
+Each individual crate's `Cargo.toml` should specify the following:
+
+```toml
+[package]
+name = "<package name>"
+description = "<A concise description for the package>"
+version = "<semver defaulting to 0.0.1>"
+```
+
+In addition, it should specify the following so that the corresponding fields
+are inferred from the root `Cargo.toml`:
+
+```toml
+authors.workspace = true
+edition.workspace = true
+exclude.workspace = true
+homepage.workspace = true
+include.workspace = true
+license.workspace = true
+repository.workspace = true
+rust-version.workspace = true
+```
+
+For binary crates, there should be a segment in the package's `Cargo.toml` that
+specifies the `name` and `path` of the package binary, e.g. within
+`cmd/chaos-client/Cargo.toml`, there should be lines stating:
+
+```toml
+[[bin]]
+name = "chaos-client"
+path = "chaos_client.rs"
+```
+
+For library crates, there should be a segment in the package's `Cargo.toml` that
+specifies the `path` of the entry point for the library, e.g. within
+`lib/font-parser/Cargo.toml`, there should be lines stating:
+
+```toml
+[lib]
+path = "font_parser.rs"
+```
+
+The source files for a crate are stored directly within the specific
+sub-directory for the crate. In this particular case, we do not follow the Rust
+convention of storing the source files within an inner `src/` sub-directory.
+
+Follow the above conventions when creating a new crate.
+
+## Dependencies
+
+Minimize the use of external dependencies so as to minimize the surface area for
+supply chain attacks. When adding new external dependencies to the root
+`Cargo.toml` for the Workspace, they should pin the version with an `=`, e.g.
+
+```toml
+syn = "=2.0.101"
+```
+
+## Tech Stack
+
+Espra is primarily written in Rust:
+
+- Unless stated otherwise, please follow idiomatic Rust practices and use the
+ 2024 edition.
+
+- We are using the latest stable toolchain, i.e. Rust version 1.87+
+
+Other key technologies include:
+
+- `tokio` as the primary async runtime.
+
+- `wgpu` for the WebGPU implementation.
+
+- `winit` for windowing and related events.
+
+## Documentation
+
+The main README for the project is at `README.md`.
+
+Each crate within `lib/` must have a `README.md` specific to that crate which
+documents that crate. Please keep the associated `README.md` file updated as the
+corresponding code changes.
+
+When talking through the design and spec for a particular crate, or a set of
+features in a crate, persist it within a `SPEC.md` file within the crate's
+directory.
+
+Keep `SPEC.md` updated, and if asked to break it down into smaller steps with
+prompts for generating code, write the prompts for each step clearly into a
+separate `TASKS.md` file within the same directory.
+
+All other documentation should be kept in the top-level `doc/` directory, and
+kept updated as the code base evolves.
+
+## Coding Guidelines
+
+- Provide clean, production-grade, high quality code.
+- Use `clippy` and `rustfmt` conventions.
+- Prefer `Result<T, E>` over panics for error handling.
+- Use `Option` for nullable values.
+- Document all public functions with Rustdoc comments.
+- Avoid unsafe code unless absolutely necessary.
+- End all comments with a period.
+- Keep comments concise and understandable.
+- Avoid unnecessary comments.
+- Prefer explicit types in function signatures.
+- Configuration is in `Cargo.toml` and `rustfmt.toml`.
+
+## What to Avoid
+
+- Do not suggest code that uses deprecated Rust features.
+
+- Avoid non-idiomatic patterns (e.g., manual memory management).
+
+- Do not generate code for files in `.gitignore`.
+
+## Commits
+
+We use the `git` version control system on a repo that is hosted on GitHub.
+
+Once you've done what has been asked of you, ALWAYS create a git commit of the
+changes that have been made. Commit messages must cover everything that has been
+changed and staged, not just what you have done.
+
+Commit messages must be of the form:
+
+```
+<area>: <one-line-summary>
+
+<expanded-summary>
+```
+
+The `<area>` should reference the main directory in which the changes have been
+made, e.g. `lib/cli`. The referenced directory must never be more than 4
+directories deep, e.g. `lib` is okay, `lib/cli` is okay, but
+`lib/cli/ui/component/spinner` is not okay.
+
+If the main directory for the changes happens to be the root directory, i.e. the
+top directory of the repo, then:
+
+* If only one top-level file has been changed, use the name of the file, without
+ any leading dots or trailing file extensions, e.g. `README` instead of
+ `README.md`, `gitignore` instead of `.gitignore`, etc.
+
+* If multiple files have been changed, then use `all` for `<area>`.
+
+The `<one-line-summary>` should always be in lowercase, except for when
+referencing any variable or type names or external projects. The total length of
+the `<area>` and `<one-line-summary>` together must never exceed 66 bytes.
+
+The first word of the `<one-line-summary>` must always be a verb, and the line
+should convey the core essence of what has been changed in an informative, but
+concise manner. The line should never end with a period or any other punctuation
+mark.
+
+The `<expanded-summary>` should elaborate on what has been changed so that it is
+useful for anyone who might be looking at it to help fix any issues that might
+have arisen.
+
+The summary must be properly punctuated, and reflowed so that no line of the
+`<expanded-summary>` exceeds 73 bytes.
+
+Here are some examples of how commit messages should look:
+
+```
+cmd/trace: handle Sync event at the beginning of the trace
+
+Currently the code assumes that there's no Sync event at the start of
+the trace, but this hasn't been correct for some time. Count Syncs and
+look for at least one instead of looking for zero.
+```
+
+```
+cmd/internal/obj/s390x: fix potential recursive String call
+
+This String method can potentially recurse infinitely, since %#x will
+apparently call String if the method exists. This isn't well documented,
+but cmd/vet will be updated soon to check this (when we update the
+vendored x/tools dependency) so cut off the recursion by converting to
+the underlying type first.
+```
+
+```
+crypto/tls: reject duplicate TLS 1.3 EncryptedExtensions
+
+When a TLS 1.3 client processes the server's encryptedExtensionsMsg it
+should reject instances that contain duplicate extension types.
+
+RFC 8446 §4.2 says:
+ There MUST NOT be more than one extension of the same type in a given
+ extension block.
+
+This update matches enforcement done in the client hello unmarshalling,
+but applied to the TLS 1.3 encrypted extensions message unmarshalling.
+```
+
+Commits MUST NOT include any lines referencing the AI assisted tools that have
+been used, i.e. there should be no lines like `Generated with` or
+`Co-Authored-By`.
+
+## Testing
+
+All of the code should be well tested, so as to maximize testing coverage across
+the code base.
+
+However, do not go overboard in creating lots of unnecessary tests. Tests should
+be comprehensive, but minimal.
+
+Use simple but descriptive names for test function names. Avoid prefixing these
+function names with `test_`.
+
+When importing the crate that is being tested, make sure to reference it by its
+actual package name, with hyphens converted to underscores, but alias it to the
+name that will be used in the rest of the code base.
+
+For example, within `lib/espra-cli/tests/some_test.rs`, import the crate as:
+
+```rust
+use espra_cli as cli;
+```
+
+Tests must be kept within `tests` sub-directories of packages. The names of
+files within this directory must end with a `_test.rs` suffix. All tests must be
+at the top-level of these files, and not within any inner modules.
+
+Similarly, any examples must be within an `examples` sub-directory and
+benchmarks must be within a `benches` sub-directory.
+
+Always test relevant changes after updating any code.
diff --git a/.aiderrules b/.aiderrules
new file mode 120000
index 0000000..821799d
--- /dev/null
+++ b/.aiderrules
@@ -0,0 +1 @@
+.ai-rules.md
\ No newline at end of file
diff --git a/.clinerules b/.clinerules
new file mode 120000
index 0000000..821799d
--- /dev/null
+++ b/.clinerules
@@ -0,0 +1 @@
+.ai-rules.md
\ No newline at end of file
diff --git a/.cursorrules b/.cursorrules
new file mode 120000
index 0000000..821799d
--- /dev/null
+++ b/.cursorrules
@@ -0,0 +1 @@
+.ai-rules.md
\ No newline at end of file
diff --git a/.roorules b/.roorules
new file mode 120000
index 0000000..821799d
--- /dev/null
+++ b/.roorules
@@ -0,0 +1 @@
+.ai-rules.md
\ No newline at end of file
diff --git a/.windsurfrules b/.windsurfrules
new file mode 120000
index 0000000..821799d
--- /dev/null
+++ b/.windsurfrules
@@ -0,0 +1 @@
+.ai-rules.md
\ No newline at end of file
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 120000
index 0000000..821799d
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1 @@
+.ai-rules.md
\ No newline at end of file
|
espra/espra | b83a03c6c12a0c4e49d146090ff77d521781036b | rustfmt: add config for formatting rust code | diff --git a/rustfmt.toml b/rustfmt.toml
new file mode 100644
index 0000000..5ff3ba5
--- /dev/null
+++ b/rustfmt.toml
@@ -0,0 +1,25 @@
+# Rust formatting configuration
+
+# Use the 2024 edition
+edition = "2024"
+
+# Common settings that align with ecosystem standards
+max_width = 100
+hard_tabs = false
+tab_spaces = 4
+newline_style = "Auto"
+use_small_heuristics = "Default"
+
+# Import and use formatting
+imports_granularity = "Crate"
+group_imports = "StdExternalCrate"
+reorder_imports = true
+reorder_modules = true
+
+# Match default Rust style
+trailing_comma = "Vertical"
+match_block_trailing_comma = false
+
+# Keep code readable
+use_field_init_shorthand = true
+use_try_shorthand = true
|
espra/espra | 0e1cca0f8eb93c3fa8e4e0d92c3896e227542bb8 | gitignore: expand patterns for macOS, Windows, and AI tools | diff --git a/.gitignore b/.gitignore
index 3fa6db4..677d738 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,34 +1,46 @@
-# System-wide excludes
+# macOS
.DS_Store
+# Windows
+
+Desktop.ini
+Thumbs.db
+
# IDEs
*~
+*.swo
+*.swp
.idea/
.vscode/
+# AI tools
+
+.aider.chat.history.md
+
# Generated files
*.gen.go
*.pb.go
coverage.txt
# Go
*.out
cpu.prof
mem.prof
# Rust
debug/
target/
*.pdb
**/*.rs.bk
# Zig
-zig-cache/
+*.o
+.zig-cache/
zig-out/
|
espra/espra | b9aa6288cda82990356520a8b31bfa5c12fb0a9d | Cargo: set up workspace for the monorepo | diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..cc706fc
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,21 @@
+[workspace]
+members = ["cmd/*", "lib/*"]
+resolver = "3"
+
+[workspace.package]
+authors = ["tav <[email protected]>"]
+edition = "2024"
+exclude = []
+homepage = "https://espra.com"
+include = ["README.md", "**/*.rs"]
+repository = "https://github.com/espra/espra"
+license = "Apache-2.0"
+rust-version = "1.87"
+
+[workspace.dependencies]
+# Internal crates
+cli = { package = "espra-cli", path = "lib/cli" }
+log = { package = "espra-log", path = "lib/log" }
+
+# External crates
+syn = "=2.0.101"
|
espra/espra | 5571ccb661ad55e5953102e4e1b7bb13aa69d079 | LICENSE: place the repo under Apache 2.0 | diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
|
espra/espra | fd1e3f0ba014203503d967857ca06c64c6b1f139 | gitignore: add common Rust-related patterns to ignore | diff --git a/.gitignore b/.gitignore
index a9e0ff6..3fa6db4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,26 +1,34 @@
# System-wide excludes
.DS_Store
# IDEs
*~
.idea/
.vscode/
# Generated files
*.gen.go
*.pb.go
coverage.txt
# Go
*.out
cpu.prof
mem.prof
+# Rust
+
+debug/
+target/
+
+*.pdb
+**/*.rs.bk
+
# Zig
zig-cache/
zig-out/
|
espra/espra | 6a92f8133647ae06ca1493f685a0ff21a9c469b7 | pkg/process: add support for generating remove handlers | diff --git a/pkg/process/process.go b/pkg/process/process.go
index d1e6866..2af0c2a 100644
--- a/pkg/process/process.go
+++ b/pkg/process/process.go
@@ -1,212 +1,247 @@
// Public Domain (-) 2010-present, The Espra Core Authors.
// See the Espra Core UNLICENSE file for details.
// Package process provides utilities for managing the current system process.
package process
import (
"context"
"fmt"
"os"
"os/signal"
"path/filepath"
"runtime/debug"
"slices"
"sync"
"syscall"
)
// OSExit is the function used to terminate the current process. It defaults to
// os.Exit, but can be overridden for testing purposes.
var OSExit = os.Exit
var (
exitDisabled bool
exiting bool
- mu sync.RWMutex // protects exitDisabled, exiting, registry
- registry = map[os.Signal][]func(){}
+ handlerID int
+ mu sync.RWMutex // protects exitDisabled, exiting, handlerID, registry
+ registry = map[os.Signal][]entry{}
testMode = false
testSig = make(chan struct{}, 10)
wait = make(chan struct{})
)
+type entry struct {
+ handler func()
+ id int
+}
+
type lockFile struct {
file string
link string
}
func (l *lockFile) release() {
os.Remove(l.file)
os.Remove(l.link)
}
+// RemoveHandler defines a function for removing a registered signal handler.
+type RemoveHandler func()
+
// Crash will terminate the process with a panic that will generate stacktraces
// for all user-generated goroutines.
func Crash() {
debug.SetTraceback("all")
panic("abort")
}
// CreatePIDFile writes the current process ID to a new file at the given path.
// The written file is removed when Exit is called, or when the process receives
// an os.Interrupt or SIGTERM signal.
func CreatePIDFile(path string) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0o660)
if err != nil {
return err
}
fmt.Fprintf(f, "%d", os.Getpid())
err = f.Close()
if err == nil {
SetExitHandler(func() {
os.Remove(path)
})
}
return err
}
// DisableAutoExit will prevent the process from automatically exiting after
// processing os.Interrupt or SIGTERM signals. This will not be enforced if Exit
// is called directly.
func DisableAutoExit() {
mu.Lock()
exitDisabled = true
mu.Unlock()
}
// Exit runs the registered exit handlers, as if the os.Interrupt signal had
// been sent, and then terminates the process with the given status code. Exit
// blocks until the process terminates if it has already been called elsewhere.
func Exit(code int) {
mu.Lock()
if exiting {
mu.Unlock()
if testMode {
testSig <- struct{}{}
}
<-wait
return
}
exiting = true
- handlers := slices.Clone(registry[os.Interrupt])
+ entries := slices.Clone(registry[os.Interrupt])
mu.Unlock()
- for _, handler := range handlers {
- handler()
+ for _, entry := range entries {
+ entry.handler()
}
OSExit(code)
}
// Init tries to acquire a process lock and write the PID file for the current
// process.
func Init(directory string, name string) error {
if err := Lock(directory, name); err != nil {
return err
}
return CreatePIDFile(filepath.Join(directory, name+".pid"))
}
// Lock tries to acquire a process lock in the given directory. The acquired
// lock file is released when Exit is called, or when the process receives an
// os.Interrupt or SIGTERM signal.
//
// This function has only been tested for correctness on Unix systems with
// filesystems where link is atomic. It may not work as expected on NFS mounts
// or on platforms like Windows.
func Lock(directory string, name string) error {
file := filepath.Join(directory, fmt.Sprintf("%s-%d.lock", name, os.Getpid()))
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY, 0o660)
if err != nil {
return err
}
f.Close()
link := filepath.Join(directory, name+".lock")
err = os.Link(file, link)
if err != nil {
// We don't remove the lock file here so that calling Lock multiple
// times from the same process doesn't remove an existing lock.
return err
}
l := &lockFile{
file: file,
link: link,
}
SetExitHandler(l.release)
return nil
}
// ReapOrphans reaps orphaned child processes and returns whether there are any
// unterminated child processes that are still active.
//
// This is currently a no-op on all platforms except Linux.
func ReapOrphans() bool {
return reap()
}
// ResetHandlers drops all currently registered handlers.
func ResetHandlers() {
mu.Lock()
- registry = map[os.Signal][]func(){}
+ registry = map[os.Signal][]entry{}
mu.Unlock()
}
// RunReaper continuously attempts to reap orphaned child processes until the
// given context is cancelled.
//
// On Linux, this will register the current process as a child subreaper, and
// attempt to reap child processes whenever SIGCHLD is received. On all other
// platforms, this is currently a no-op.
func RunReaper(ctx context.Context) {
runReaper(ctx)
}
// SetExitHandler registers the given handler function to run when receiving
// os.Interrupt or SIGTERM signals. Registered handlers are executed in reverse
// order of when they were set.
-func SetExitHandler(handler func()) {
+func SetExitHandler(handler func()) RemoveHandler {
mu.Lock()
- registry[os.Interrupt] = slices.Insert(registry[os.Interrupt], 0, handler)
- registry[syscall.SIGTERM] = slices.Insert(registry[syscall.SIGTERM], 0, handler)
+ e := entry{handler, handlerID}
+ handlerID++
+ registry[os.Interrupt] = slices.Insert(registry[os.Interrupt], 0, e)
+ registry[syscall.SIGTERM] = slices.Insert(registry[syscall.SIGTERM], 0, e)
mu.Unlock()
+ return func() {
+ removeHandler(e.id, os.Interrupt, syscall.SIGTERM)
+ }
}
// SetSignalHandler registers the given handler function to run when receiving
// the specified signal. Registered handlers are executed in reverse order of
// when they were set.
-func SetSignalHandler(signal os.Signal, handler func()) {
+func SetSignalHandler(signal os.Signal, handler func()) RemoveHandler {
mu.Lock()
- registry[signal] = slices.Insert(registry[signal], 0, handler)
+ e := entry{handler, handlerID}
+ handlerID++
+ registry[signal] = slices.Insert(registry[signal], 0, e)
mu.Unlock()
+ return func() {
+ removeHandler(e.id, signal)
+ }
}
func handleSignals() {
notifier := make(chan os.Signal, 100)
signal.Notify(notifier)
go func() {
for sig := range notifier {
mu.Lock()
disabled := exitDisabled
if !disabled {
if sig == syscall.SIGTERM || sig == os.Interrupt {
exiting = true
}
}
- handlers := slices.Clone(registry[sig])
+ entries := slices.Clone(registry[sig])
mu.Unlock()
- for _, handler := range handlers {
- handler()
+ for _, entry := range entries {
+ entry.handler()
}
if !disabled {
if sig == syscall.SIGTERM || sig == os.Interrupt {
OSExit(1)
}
}
if testMode {
testSig <- struct{}{}
}
}
}()
}
+func removeHandler(id int, signals ...os.Signal) {
+ mu.Lock()
+ for _, signal := range signals {
+ entries := registry[signal]
+ idx := -1
+ for i, entry := range entries {
+ if entry.id == id {
+ idx = i
+ break
+ }
+ }
+ registry[signal] = append(entries[:idx], entries[idx+1:]...)
+ }
+ mu.Unlock()
+}
+
func init() {
handleSignals()
}
diff --git a/pkg/process/process_test.go b/pkg/process/process_test.go
index c1f8cd9..d121309 100644
--- a/pkg/process/process_test.go
+++ b/pkg/process/process_test.go
@@ -1,208 +1,230 @@
// Public Domain (-) 2010-present, The Espra Core Authors.
// See the Espra Core UNLICENSE file for details.
package process
import (
"fmt"
"os"
"path/filepath"
"strconv"
"syscall"
"testing"
"espra.dev/pkg/osexit"
)
func TestCrash(t *testing.T) {
defer func() {
err := recover()
if err == nil {
t.Fatalf("Crash didn't generate an abort error")
}
}()
Crash()
}
func TestCreatePIDFile(t *testing.T) {
reset()
dir := mktemp(t)
defer os.RemoveAll(dir)
fpath := filepath.Join(dir, "test.pid")
err := CreatePIDFile(fpath)
if err != nil {
t.Fatalf("Unexpected error creating PID file: %s", err)
}
written, err := os.ReadFile(fpath)
if err != nil {
t.Fatalf("Unexpected error reading PID file: %s", err)
}
expected := os.Getpid()
pid, err := strconv.ParseInt(string(written), 10, 64)
if err != nil {
t.Fatalf("Unexpected error parsing PID file contents as an int: %s", err)
}
if int(pid) != expected {
t.Fatalf("Mismatching PID file contents: got %d, want %d", int(pid), expected)
}
Exit(2)
if !osexit.Called() || osexit.Status() != 2 {
t.Fatalf("Exit call did not behave as expected")
}
_, err = os.Stat(fpath)
if err == nil {
t.Fatalf("Calling Exit did not remove the created PID file as expected")
}
if !os.IsNotExist(err) {
t.Fatalf("Calling Exit did not remove the created PID file as expected, got error: %s", err)
}
fpath = filepath.Join(dir+"-nonexistent-directory", "test.pid")
err = CreatePIDFile(fpath)
if err == nil {
t.Fatalf("Expected an error when creating PID file in a non-existent directory")
}
}
func TestDisableDefaultExit(t *testing.T) {
reset()
called := false
SetExitHandler(func() {
called = true
})
send(syscall.SIGTERM)
if !osexit.Called() {
t.Fatalf("os.Exit was not called on SIGTERM")
}
if !called {
t.Fatalf("Exit handler not run on SIGTERM")
}
DisableAutoExit()
osexit.Reset()
called = false
resetExiting()
send(syscall.SIGTERM)
if osexit.Called() {
t.Fatalf("os.Exit was called on SIGTERM even after DisableAutoExit()")
}
if !called {
t.Fatalf("Exit handler not run on SIGTERM after DisableAutoExit")
}
}
func TestExit(t *testing.T) {
reset()
called := false
- SetExitHandler(func() {
+ remove := SetExitHandler(func() {
called = true
})
Exit(7)
if !osexit.Called() {
t.Fatalf("Exit did not call os.Exit")
}
status := osexit.Status()
if status != 7 {
t.Fatalf("Exit did not set the right status code: got %d, want 7", status)
}
if !called {
t.Fatalf("Exit handler was not run when calling Exit")
}
osexit.Reset()
called = false
go func() {
Exit(8)
}()
<-testSig
wait <- struct{}{}
if osexit.Called() {
t.Fatalf("Second call to Exit called os.Exit")
}
if called {
t.Fatalf("Second call to Exit resulted in Exit handler being run again")
}
+ resetExiting()
+ osexit.Reset()
+ called = false
+ remove()
+ Exit(9)
+ if !osexit.Called() {
+ t.Fatalf("Third call to Exit didn't call os.Exit")
+ }
+ if called {
+ t.Fatalf("Exit handler called after being removed")
+ }
}
func TestInit(t *testing.T) {
dir := mktemp(t)
defer os.RemoveAll(dir)
err := Init(dir, "core")
if err != nil {
t.Fatalf("Unexpected error initialising process: %s", err)
}
err = Init(dir+"-nonexistent-directory", "core")
if err == nil {
t.Fatalf("Expected an error when calling Init in a non-existing directory")
}
}
func TestLock(t *testing.T) {
reset()
dir := mktemp(t)
defer os.RemoveAll(dir)
err := Lock(dir, "core")
if err != nil {
t.Fatalf("Unexpected error acquiring Lock: %s", err)
}
err = Lock(dir, "core")
if err == nil {
t.Fatalf("Expected an error when calling Lock on an already locked path")
}
fpath := filepath.Join(dir, fmt.Sprintf("core-%d.lock", os.Getpid()))
_, err = os.Stat(fpath)
if err != nil {
t.Fatalf("Unexpected error accessing the raw lock file: %s", err)
}
Exit(2)
_, err = os.Stat(fpath)
if err == nil {
t.Fatalf("Calling Exit did not remove the lock file as expected")
}
if !os.IsNotExist(err) {
t.Fatalf("Calling Exit did not remove the lock file as expected, got error: %s", err)
}
err = Lock(dir+"-nonexistent-directory", "core")
if err == nil {
t.Fatalf("Expected an error when calling Lock in a non-existing directory")
}
}
func TestSignalHandler(t *testing.T) {
reset()
called := false
- SetSignalHandler(syscall.SIGHUP, func() {
+ remove := SetSignalHandler(syscall.SIGHUP, func() {
called = true
})
send(syscall.SIGABRT)
if called {
t.Fatalf("Signal handler erroneously called on SIGABRT")
}
send(syscall.SIGHUP)
if !called {
t.Fatalf("Signal handler not called on SIGHUP")
}
+ called = false
+ send(syscall.SIGHUP)
+ if !called {
+ t.Fatalf("Signal handler not called on SIGHUP the second time")
+ }
+ called = false
+ remove()
+ send(syscall.SIGHUP)
+ if called {
+ t.Fatalf("Signal handler called on SIGHUP after being removed")
+ }
}
func mktemp(t *testing.T) string {
dir, err := os.MkdirTemp("", "core-process")
if err != nil {
t.Skipf("Unable to create temporary directory for tests: %s", err)
}
return dir
}
func reset() {
OSExit = osexit.Set()
testMode = true
ResetHandlers()
resetExiting()
}
func resetExiting() {
mu.Lock()
exiting = false
mu.Unlock()
}
func send(sig syscall.Signal) {
syscall.Kill(syscall.Getpid(), sig)
<-testSig
}
|
espra/espra | a3ac093f1af07f192d9390dbfe42ea6ad6ef4650 | pkg/process: add support for managing the current process | diff --git a/pkg/process/process.go b/pkg/process/process.go
new file mode 100644
index 0000000..d1e6866
--- /dev/null
+++ b/pkg/process/process.go
@@ -0,0 +1,212 @@
+// Public Domain (-) 2010-present, The Espra Core Authors.
+// See the Espra Core UNLICENSE file for details.
+
+// Package process provides utilities for managing the current system process.
+package process
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "runtime/debug"
+ "slices"
+ "sync"
+ "syscall"
+)
+
+// OSExit is the function used to terminate the current process. It defaults to
+// os.Exit, but can be overridden for testing purposes.
+var OSExit = os.Exit
+
+var (
+ exitDisabled bool
+ exiting bool
+ mu sync.RWMutex // protects exitDisabled, exiting, registry
+ registry = map[os.Signal][]func(){}
+ testMode = false
+ testSig = make(chan struct{}, 10)
+ wait = make(chan struct{})
+)
+
+type lockFile struct {
+ file string
+ link string
+}
+
+func (l *lockFile) release() {
+ os.Remove(l.file)
+ os.Remove(l.link)
+}
+
+// Crash will terminate the process with a panic that will generate stacktraces
+// for all user-generated goroutines.
+func Crash() {
+ debug.SetTraceback("all")
+ panic("abort")
+}
+
+// CreatePIDFile writes the current process ID to a new file at the given path.
+// The written file is removed when Exit is called, or when the process receives
+// an os.Interrupt or SIGTERM signal.
+func CreatePIDFile(path string) error {
+ f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0o660)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(f, "%d", os.Getpid())
+ err = f.Close()
+ if err == nil {
+ SetExitHandler(func() {
+ os.Remove(path)
+ })
+ }
+ return err
+}
+
+// DisableAutoExit will prevent the process from automatically exiting after
+// processing os.Interrupt or SIGTERM signals. This will not be enforced if Exit
+// is called directly.
+func DisableAutoExit() {
+ mu.Lock()
+ exitDisabled = true
+ mu.Unlock()
+}
+
+// Exit runs the registered exit handlers, as if the os.Interrupt signal had
+// been sent, and then terminates the process with the given status code. Exit
+// blocks until the process terminates if it has already been called elsewhere.
+func Exit(code int) {
+ mu.Lock()
+ if exiting {
+ mu.Unlock()
+ if testMode {
+ testSig <- struct{}{}
+ }
+ <-wait
+ return
+ }
+ exiting = true
+ handlers := slices.Clone(registry[os.Interrupt])
+ mu.Unlock()
+ for _, handler := range handlers {
+ handler()
+ }
+ OSExit(code)
+}
+
+// Init tries to acquire a process lock and write the PID file for the current
+// process.
+func Init(directory string, name string) error {
+ if err := Lock(directory, name); err != nil {
+ return err
+ }
+ return CreatePIDFile(filepath.Join(directory, name+".pid"))
+}
+
+// Lock tries to acquire a process lock in the given directory. The acquired
+// lock file is released when Exit is called, or when the process receives an
+// os.Interrupt or SIGTERM signal.
+//
+// This function has only been tested for correctness on Unix systems with
+// filesystems where link is atomic. It may not work as expected on NFS mounts
+// or on platforms like Windows.
+func Lock(directory string, name string) error {
+ file := filepath.Join(directory, fmt.Sprintf("%s-%d.lock", name, os.Getpid()))
+ f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY, 0o660)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ link := filepath.Join(directory, name+".lock")
+ err = os.Link(file, link)
+ if err != nil {
+ // We don't remove the lock file here so that calling Lock multiple
+ // times from the same process doesn't remove an existing lock.
+ return err
+ }
+ l := &lockFile{
+ file: file,
+ link: link,
+ }
+ SetExitHandler(l.release)
+ return nil
+}
+
+// ReapOrphans reaps orphaned child processes and returns whether there are any
+// unterminated child processes that are still active.
+//
+// This is currently a no-op on all platforms except Linux.
+func ReapOrphans() bool {
+ return reap()
+}
+
+// ResetHandlers drops all currently registered handlers.
+func ResetHandlers() {
+ mu.Lock()
+ registry = map[os.Signal][]func(){}
+ mu.Unlock()
+}
+
+// RunReaper continuously attempts to reap orphaned child processes until the
+// given context is cancelled.
+//
+// On Linux, this will register the current process as a child subreaper, and
+// attempt to reap child processes whenever SIGCHLD is received. On all other
+// platforms, this is currently a no-op.
+func RunReaper(ctx context.Context) {
+ runReaper(ctx)
+}
+
+// SetExitHandler registers the given handler function to run when receiving
+// os.Interrupt or SIGTERM signals. Registered handlers are executed in reverse
+// order of when they were set.
+func SetExitHandler(handler func()) {
+ mu.Lock()
+ registry[os.Interrupt] = slices.Insert(registry[os.Interrupt], 0, handler)
+ registry[syscall.SIGTERM] = slices.Insert(registry[syscall.SIGTERM], 0, handler)
+ mu.Unlock()
+}
+
+// SetSignalHandler registers the given handler function to run when receiving
+// the specified signal. Registered handlers are executed in reverse order of
+// when they were set.
+func SetSignalHandler(signal os.Signal, handler func()) {
+ mu.Lock()
+ registry[signal] = slices.Insert(registry[signal], 0, handler)
+ mu.Unlock()
+}
+
+func handleSignals() {
+ notifier := make(chan os.Signal, 100)
+ signal.Notify(notifier)
+ go func() {
+ for sig := range notifier {
+ mu.Lock()
+ disabled := exitDisabled
+ if !disabled {
+ if sig == syscall.SIGTERM || sig == os.Interrupt {
+ exiting = true
+ }
+ }
+ handlers := slices.Clone(registry[sig])
+ mu.Unlock()
+ for _, handler := range handlers {
+ handler()
+ }
+ if !disabled {
+ if sig == syscall.SIGTERM || sig == os.Interrupt {
+ OSExit(1)
+ }
+ }
+ if testMode {
+ testSig <- struct{}{}
+ }
+ }
+ }()
+}
+
+func init() {
+ handleSignals()
+}
diff --git a/pkg/process/process_test.go b/pkg/process/process_test.go
new file mode 100644
index 0000000..c1f8cd9
--- /dev/null
+++ b/pkg/process/process_test.go
@@ -0,0 +1,208 @@
+// Public Domain (-) 2010-present, The Espra Core Authors.
+// See the Espra Core UNLICENSE file for details.
+
+package process
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "syscall"
+ "testing"
+
+ "espra.dev/pkg/osexit"
+)
+
+func TestCrash(t *testing.T) {
+ defer func() {
+ err := recover()
+ if err == nil {
+ t.Fatalf("Crash didn't generate an abort error")
+ }
+ }()
+ Crash()
+}
+
+func TestCreatePIDFile(t *testing.T) {
+ reset()
+ dir := mktemp(t)
+ defer os.RemoveAll(dir)
+ fpath := filepath.Join(dir, "test.pid")
+ err := CreatePIDFile(fpath)
+ if err != nil {
+ t.Fatalf("Unexpected error creating PID file: %s", err)
+ }
+ written, err := os.ReadFile(fpath)
+ if err != nil {
+ t.Fatalf("Unexpected error reading PID file: %s", err)
+ }
+ expected := os.Getpid()
+ pid, err := strconv.ParseInt(string(written), 10, 64)
+ if err != nil {
+ t.Fatalf("Unexpected error parsing PID file contents as an int: %s", err)
+ }
+ if int(pid) != expected {
+ t.Fatalf("Mismatching PID file contents: got %d, want %d", int(pid), expected)
+ }
+ Exit(2)
+ if !osexit.Called() || osexit.Status() != 2 {
+ t.Fatalf("Exit call did not behave as expected")
+ }
+ _, err = os.Stat(fpath)
+ if err == nil {
+ t.Fatalf("Calling Exit did not remove the created PID file as expected")
+ }
+ if !os.IsNotExist(err) {
+ t.Fatalf("Calling Exit did not remove the created PID file as expected, got error: %s", err)
+ }
+ fpath = filepath.Join(dir+"-nonexistent-directory", "test.pid")
+ err = CreatePIDFile(fpath)
+ if err == nil {
+ t.Fatalf("Expected an error when creating PID file in a non-existent directory")
+ }
+}
+
+func TestDisableDefaultExit(t *testing.T) {
+ reset()
+ called := false
+ SetExitHandler(func() {
+ called = true
+ })
+ send(syscall.SIGTERM)
+ if !osexit.Called() {
+ t.Fatalf("os.Exit was not called on SIGTERM")
+ }
+ if !called {
+ t.Fatalf("Exit handler not run on SIGTERM")
+ }
+ DisableAutoExit()
+ osexit.Reset()
+ called = false
+ resetExiting()
+ send(syscall.SIGTERM)
+ if osexit.Called() {
+ t.Fatalf("os.Exit was called on SIGTERM even after DisableAutoExit()")
+ }
+ if !called {
+ t.Fatalf("Exit handler not run on SIGTERM after DisableAutoExit")
+ }
+}
+
+func TestExit(t *testing.T) {
+ reset()
+ called := false
+ SetExitHandler(func() {
+ called = true
+ })
+ Exit(7)
+ if !osexit.Called() {
+ t.Fatalf("Exit did not call os.Exit")
+ }
+ status := osexit.Status()
+ if status != 7 {
+ t.Fatalf("Exit did not set the right status code: got %d, want 7", status)
+ }
+ if !called {
+ t.Fatalf("Exit handler was not run when calling Exit")
+ }
+ osexit.Reset()
+ called = false
+ go func() {
+ Exit(8)
+ }()
+ <-testSig
+ wait <- struct{}{}
+ if osexit.Called() {
+ t.Fatalf("Second call to Exit called os.Exit")
+ }
+ if called {
+ t.Fatalf("Second call to Exit resulted in Exit handler being run again")
+ }
+}
+
+func TestInit(t *testing.T) {
+ dir := mktemp(t)
+ defer os.RemoveAll(dir)
+ err := Init(dir, "core")
+ if err != nil {
+ t.Fatalf("Unexpected error initialising process: %s", err)
+ }
+ err = Init(dir+"-nonexistent-directory", "core")
+ if err == nil {
+ t.Fatalf("Expected an error when calling Init in a non-existing directory")
+ }
+}
+
+func TestLock(t *testing.T) {
+ reset()
+ dir := mktemp(t)
+ defer os.RemoveAll(dir)
+ err := Lock(dir, "core")
+ if err != nil {
+ t.Fatalf("Unexpected error acquiring Lock: %s", err)
+ }
+ err = Lock(dir, "core")
+ if err == nil {
+ t.Fatalf("Expected an error when calling Lock on an already locked path")
+ }
+ fpath := filepath.Join(dir, fmt.Sprintf("core-%d.lock", os.Getpid()))
+ _, err = os.Stat(fpath)
+ if err != nil {
+ t.Fatalf("Unexpected error accessing the raw lock file: %s", err)
+ }
+ Exit(2)
+ _, err = os.Stat(fpath)
+ if err == nil {
+ t.Fatalf("Calling Exit did not remove the lock file as expected")
+ }
+ if !os.IsNotExist(err) {
+ t.Fatalf("Calling Exit did not remove the lock file as expected, got error: %s", err)
+ }
+ err = Lock(dir+"-nonexistent-directory", "core")
+ if err == nil {
+ t.Fatalf("Expected an error when calling Lock in a non-existing directory")
+ }
+}
+
+func TestSignalHandler(t *testing.T) {
+ reset()
+ called := false
+ SetSignalHandler(syscall.SIGHUP, func() {
+ called = true
+ })
+ send(syscall.SIGABRT)
+ if called {
+ t.Fatalf("Signal handler erroneously called on SIGABRT")
+ }
+ send(syscall.SIGHUP)
+ if !called {
+ t.Fatalf("Signal handler not called on SIGHUP")
+ }
+}
+
+func mktemp(t *testing.T) string {
+ dir, err := os.MkdirTemp("", "core-process")
+ if err != nil {
+ t.Skipf("Unable to create temporary directory for tests: %s", err)
+ }
+ return dir
+}
+
+func reset() {
+ OSExit = osexit.Set()
+ testMode = true
+ ResetHandlers()
+ resetExiting()
+}
+
+func resetExiting() {
+ mu.Lock()
+ exiting = false
+ mu.Unlock()
+}
+
+func send(sig syscall.Signal) {
+ syscall.Kill(syscall.Getpid(), sig)
+ <-testSig
+}
diff --git a/pkg/process/reap.go b/pkg/process/reap.go
new file mode 100644
index 0000000..58312bc
--- /dev/null
+++ b/pkg/process/reap.go
@@ -0,0 +1,17 @@
+// Public Domain (-) 2018-present, The Espra Core Authors.
+// See the Espra Core UNLICENSE file for details.
+
+//go:build !linux
+
+package process
+
+import (
+ "context"
+)
+
+func reap() bool {
+ return false
+}
+
+func runReaper(ctx context.Context) {
+}
diff --git a/pkg/process/reap_linux.go b/pkg/process/reap_linux.go
new file mode 100644
index 0000000..62daf70
--- /dev/null
+++ b/pkg/process/reap_linux.go
@@ -0,0 +1,47 @@
+// Public Domain (-) 2018-present, The Espra Core Authors.
+// See the Espra Core UNLICENSE file for details.
+
+package process
+
+import (
+ "context"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func reap() bool {
+ status := syscall.WaitStatus(0)
+ for {
+ pid, err := syscall.Wait4(-1, &status, unix.WNOHANG, nil)
+ if pid == 0 {
+ return true
+ }
+ if pid == -1 && err == syscall.ECHILD {
+ return false
+ }
+ }
+}
+
+func runReaper(ctx context.Context) {
+ if os.Getpid() != 1 {
+ unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0)
+ }
+ notifier := make(chan os.Signal, 4096)
+ signal.Notify(notifier, syscall.SIGCHLD)
+outer:
+ for {
+ select {
+ case <-notifier:
+ reap()
+ if testMode {
+ testSig <- struct{}{}
+ }
+ case <-ctx.Done():
+ signal.Stop(notifier)
+ break outer
+ }
+ }
+}
diff --git a/pkg/process/reap_test.go b/pkg/process/reap_test.go
new file mode 100644
index 0000000..9903ed3
--- /dev/null
+++ b/pkg/process/reap_test.go
@@ -0,0 +1,47 @@
+// Public Domain (-) 2018-present, The Espra Core Authors.
+// See the Espra Core UNLICENSE file for details.
+
+package process
+
+import (
+ "context"
+ "os/exec"
+ "runtime"
+ "syscall"
+ "testing"
+)
+
+func TestReapOrphans(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ ReapOrphans()
+ return
+ }
+ testMode = true
+ cmd := exec.Command("sleep", "100")
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("Unexpected error when trying to run `sleep 100`: %s", err)
+ }
+ if more := ReapOrphans(); !more {
+ t.Fatalf("Failed to find unterminated child process when calling ReapOrphans")
+ }
+ syscall.Kill(cmd.Process.Pid, syscall.SIGTERM)
+ if more := ReapOrphans(); more {
+ t.Fatalf("Unterminated child process encountered when calling ReapOrphans")
+ }
+}
+
+func TestRunReaper(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ go RunReaper(ctx)
+ defer cancel()
+ if runtime.GOOS != "linux" {
+ return
+ }
+ testMode = true
+ cmd := exec.Command("sleep", "100")
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("Unexpected error when trying to run `sleep 100`: %s", err)
+ }
+ syscall.Kill(cmd.Process.Pid, syscall.SIGTERM)
+ <-testSig
+}
|
espra/espra | cb554bd2cbfe8ac964d5b5387c5961b5750cadb9 | go: init module | diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..51d236d
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,3 @@
+module espra.dev
+
+go 1.21.5
|
espra/espra | a7a791815ae98a03a4c5a0af806aeb7afdbd0a88 | COPYING: add dummy file to satisfy pkg.go.dev | diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..9dc90ed
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,127 @@
+PLEASE IGNORE THIS FILE. IT'S ONLY HERE TO SATISFY PKG.GO.DEV.
+
+FOR THE ACTUAL LICENSE, PLEASE SEE: UNLICENSE.md
+
+-------------------------------------------------------------------------------
+
+Creative Commons Legal Code
+
+CC0 1.0 Universal
+
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
+ LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
+ ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
+ INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
+ REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
+ PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
+ THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
+ HEREUNDER.
+
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator
+and subsequent owner(s) (each and all, an "owner") of an original work of
+authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for
+the purpose of contributing to a commons of creative, cultural and
+scientific works ("Commons") that the public can reliably and without fear
+of later claims of infringement build upon, modify, incorporate in other
+works, reuse and redistribute as freely as possible in any form whatsoever
+and for any purposes, including without limitation commercial purposes.
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural and scientific
+works, or to gain reputation or greater distribution for their Work in
+part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any
+expectation of additional consideration or compensation, the person
+associating CC0 with a Work (the "Affirmer"), to the extent that he or she
+is an owner of Copyright and Related Rights in the Work, voluntarily
+elects to apply CC0 to the Work and publicly distribute the Work under its
+terms, with knowledge of his or her Copyright and Related Rights in the
+Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be
+protected by copyright and related or neighboring rights ("Copyright and
+Related Rights"). Copyright and Related Rights include, but are not
+limited to, the following:
+
+ i. the right to reproduce, adapt, distribute, perform, display,
+ communicate, and translate a Work;
+ ii. moral rights retained by the original author(s) and/or performer(s);
+iii. publicity and privacy rights pertaining to a person's image or
+ likeness depicted in a Work;
+ iv. rights protecting against unfair competition in regards to a Work,
+ subject to the limitations in paragraph 4(a), below;
+ v. rights protecting the extraction, dissemination, use and reuse of data
+ in a Work;
+ vi. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation
+ thereof, including any amended or successor version of such
+ directive); and
+vii. other similar, equivalent or corresponding rights throughout the
+ world based on applicable law or treaty, and any national
+ implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention
+of, applicable law, Affirmer hereby overtly, fully, permanently,
+irrevocably and unconditionally waives, abandons, and surrenders all of
+Affirmer's Copyright and Related Rights and associated claims and causes
+of action, whether now known or unknown (including existing as well as
+future claims and causes of action), in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or
+treaty (including future time extensions), (iii) in any current or future
+medium and for any number of copies, and (iv) for any purpose whatsoever,
+including without limitation commercial, advertising or promotional
+purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
+member of the public at large and to the detriment of Affirmer's heirs and
+successors, fully intending that such Waiver shall not be subject to
+revocation, rescission, cancellation, termination, or any other legal or
+equitable action to disrupt the quiet enjoyment of the Work by the public
+as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason
+be judged legally invalid or ineffective under applicable law, then the
+Waiver shall be preserved to the maximum extent permitted taking into
+account Affirmer's express Statement of Purpose. In addition, to the
+extent the Waiver is so judged Affirmer hereby grants to each affected
+person a royalty-free, non transferable, non sublicensable, non exclusive,
+irrevocable and unconditional license to exercise Affirmer's Copyright and
+Related Rights in the Work (i) in all territories worldwide, (ii) for the
+maximum duration provided by applicable law or treaty (including future
+time extensions), (iii) in any current or future medium and for any number
+of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising or promotional purposes (the
+"License"). The License shall be deemed effective as of the date CC0 was
+applied by Affirmer to the Work. Should any part of the License for any
+reason be judged legally invalid or ineffective under applicable law, such
+partial invalidity or ineffectiveness shall not invalidate the remainder
+of the License, and in such case Affirmer hereby affirms that he or she
+will not (i) exercise any of his or her remaining Copyright and Related
+Rights in the Work or (ii) assert any associated claims and causes of
+action with respect to the Work, in either case contrary to Affirmer's
+express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
+ surrendered, licensed or otherwise affected by this document.
+ b. Affirmer offers the Work as-is and makes no representations or
+ warranties of any kind concerning the Work, express, implied,
+ statutory or otherwise, including without limitation warranties of
+ title, merchantability, fitness for a particular purpose, non
+ infringement, or the absence of latent or other defects, accuracy, or
+ the present or absence of errors, whether or not discoverable, all to
+ the greatest extent permissible under applicable law.
+ c. Affirmer disclaims responsibility for clearing rights of other persons
+ that may apply to the Work or any use thereof, including without
+ limitation any person's Copyright and Related Rights in the Work.
+ Further, Affirmer disclaims responsibility for obtaining any necessary
+ consents, permissions or other rights required for any use of the
+ Work.
+ d. Affirmer understands and acknowledges that Creative Commons is not a
+ party to this document and has no duty or obligation with respect to
+ this CC0 or use of the Work.
|
espra/espra | b0a7193020210b8a63c4bb0bb37aec675206e356 | pkg/osexit: add a mock of os.Exit for testing purposes | diff --git a/pkg/osexit/osexit.go b/pkg/osexit/osexit.go
new file mode 100644
index 0000000..6635b80
--- /dev/null
+++ b/pkg/osexit/osexit.go
@@ -0,0 +1,84 @@
+// Public Domain (-) 2018-present, The Espra Core Authors.
+// See the Espra Core UNLICENSE file for details.
+
+// Package osexit mocks the os.Exit function.
+//
+// To use, first set a package-specific exit function, e.g.
+//
+// var exit = os.Exit
+//
+// Then use it instead of a direct call to os.Exit, e.g.
+//
+// if somethingFatal {
+// exit(1)
+// return
+// }
+//
+// Make sure to return immediately after the call to exit, so that testing code
+// will match real code as closely as possible.
+//
+// You can now use the utility functions provided by this package to override
+// exit for testing purposes, e.g.
+//
+// exit = osexit.Set()
+// invokeCodeCallingExit()
+// if !osexit.Called() {
+// t.Fatalf("os.Exit was not called as expected")
+// }
+package osexit
+
+import (
+ "sync"
+)
+
+var (
+ called bool
+ mu sync.RWMutex // protects called, status
+ status int
+)
+
+// Called returns whether the mock os.Exit function was called.
+func Called() bool {
+ mu.RLock()
+ c := called
+ mu.RUnlock()
+ return c
+}
+
+// Func provides a mock for the os.Exit function. Special care must be taken
+// when testing os.Exit to make sure no code runs after the call to Exit. It's
+// recommended to put a return statement after Exit calls so that the behaviour
+// of the mock matches that of the real function as much as possible.
+func Func(code int) {
+ mu.Lock()
+ if called {
+ mu.Unlock()
+ return
+ }
+ called = true
+ status = code
+ mu.Unlock()
+}
+
+// Reset resets the state of the mock function.
+func Reset() {
+ mu.Lock()
+ called = false
+ status = 0
+ mu.Unlock()
+}
+
+// Set returns the mock os.Exit function after calling Reset.
+func Set() func(int) {
+ Reset()
+ return Func
+}
+
+// Status returns the status code that the mock os.Exit function was called
+// with.
+func Status() int {
+ mu.RLock()
+ s := status
+ mu.RUnlock()
+ return s
+}
diff --git a/pkg/osexit/osexit_test.go b/pkg/osexit/osexit_test.go
new file mode 100644
index 0000000..b188107
--- /dev/null
+++ b/pkg/osexit/osexit_test.go
@@ -0,0 +1,38 @@
+// Public Domain (-) 2018-present, The Espra Core Authors.
+// See the Espra Core UNLICENSE file for details.
+
+package osexit_test
+
+import (
+ "os"
+ "testing"
+
+ "espra.dev/pkg/osexit"
+)
+
+var exit = os.Exit
+
+func TestExit(t *testing.T) {
+ exit = osexit.Set()
+ exit(2)
+ if !osexit.Called() {
+ t.Fatalf("mock exit function was not called")
+ }
+ status := osexit.Status()
+ if status != 2 {
+ t.Fatalf("mock exit function did not set the right status code: got %d, want 2", status)
+ }
+ exit(3)
+ status = osexit.Status()
+ if status != 2 {
+ t.Fatalf("mock exit function overrode the status set by a previous call: got %d, want 2", status)
+ }
+ osexit.Reset()
+ if osexit.Called() {
+ t.Fatalf("the reset mock exit function claims to have been called")
+ }
+ status = osexit.Status()
+ if status != 0 {
+ t.Fatalf("the reset mock exit function returned a non-zero status code: got %d, want 0", status)
+ }
+}
|
espra/espra | aa094bc568e96ed96af7c9dfadf61ed7372965e4 | gitignore: add patterns for go and generated files | diff --git a/.gitignore b/.gitignore
index f9b4f5b..a9e0ff6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,14 +1,26 @@
# System-wide excludes
.DS_Store
# IDEs
*~
.idea/
.vscode/
+# Generated files
+
+*.gen.go
+*.pb.go
+coverage.txt
+
+# Go
+
+*.out
+cpu.prof
+mem.prof
+
# Zig
zig-cache/
zig-out/
|
espra/espra | 5268824982fb76c191583ee9f7c11c6948d9b1fa | gitignore: add base set of ignored patterns | diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..f9b4f5b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,14 @@
+# System-wide excludes
+
+.DS_Store
+
+# IDEs
+
+*~
+.idea/
+.vscode/
+
+# Zig
+
+zig-cache/
+zig-out/
|
espra/espra | 490299db29945b8e7ed0dcb00429587d9ad514cf | AUTHORS: add the initial set of authors | diff --git a/AUTHORS.eon b/AUTHORS.eon
new file mode 100644
index 0000000..5b061b7
--- /dev/null
+++ b/AUTHORS.eon
@@ -0,0 +1,36 @@
+# Espra Core Authors
+# ==================
+#
+# This is the official list of the Espra Core Authors ("The Authors").
+#
+# By adding yourself to this file, you are affirming that all Contributions that
+# you make to Espra Core ("The Work") are:
+#
+# * Created in whole or in part by yourself, and that you have the right to
+# submit it under the terms of the [Espra Core UNLICENSE], and voluntarily
+# elect to place it under those terms; or
+#
+# * Based on Third Party Work that, to the best of your knowledge, is covered
+# under a compatible open source license which gives you the right to submit
+# that work with modifications, where the license is indicated in the contents
+# of the related files or referenced within a top-level `THIRDPARTY.eon` file.
+#
+# You also acknowledge that this is a public project and that all Contributions
+# you make, including the record of those Contributions, along with any personal
+# information that you submit with them, are intended to be maintained
+# indefinitely, and may be redistributed via any medium.
+#
+# Thank you!
+#
+# [Espra Core UNLICENSE]: UNLICENSE.md
+
+tav {
+ email = [email protected]
+ github = tav
+ location {
+ area = London
+ country = GB
+ }
+ name = tav
+ twitter = tav
+}
|
espra/espra | 031464fa3de7017669e12aeae34f2cbac641bd59 | UNLICENSE: place the repo into the public domain | diff --git a/UNLICENSE.md b/UNLICENSE.md
new file mode 100644
index 0000000..49817b2
--- /dev/null
+++ b/UNLICENSE.md
@@ -0,0 +1,183 @@
+# Espra Core UNLICENSE
+
+In the spirit of contributing to the Public Domain, to the full extent possible
+under law, the Espra Core Authors ("The Authors"), as specified in the
+[`AUTHORS.eon`] file, have waived all copyright and related or neighboring
+rights to their Contributions to Espra Core ("The Work").
+
+This does not apply to works authored by third parties ("Third Party Works")
+which come with their own copyright and licensing terms. These terms may be
+defined in explicit files as specified within an optional [`THIRDPARTY.eon`]
+file or specified as part of the contents of licensed files. We recommend you
+read them as their terms may differ from the terms below.
+
+With the exception of Third Party Works, all files in this repository are
+covered by this UNLICENSE. If desired, an informative header like the following
+could be used to explicitly specify that a file is covered by this UNLICENSE:
+
+ // Public Domain (-) 2023-present, The Espra Core Authors.
+ // See the Espra Core UNLICENSE file for details.
+
+All trademarks and registered trademarks mentioned in The Work are the property
+of their respective owners.
+
+## Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator and
+subsequent owner(s) (each and all, an "owner") of an original work of authorship
+and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the
+purpose of contributing to a commons of creative, cultural, and scientific works
+("Commons") that the public can reliably and without fear of later claims of
+infringement build upon, modify, incorporate in other works, reuse, and
+redistribute as freely as possible in any form whatsoever and for any purposes,
+including without limitation commercial purposes.
+
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural, and scientific works,
+or to gain reputation or greater distribution for their Work in part through the
+use and efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation of
+additional consideration or compensation, the Authors, to the extent that they
+are an owner of Copyright and Related Rights in the Work, voluntarily elect to
+apply this UNLICENSE to the Work and publicly distribute the Work under these
+terms, with knowledge of their Copyright and Related Rights in the Work and the
+meaning and intended legal effect of this UNLICENSE on those rights.
+
+## Definitions
+
+The term "distribute" has the same meaning here as under U.S. copyright law. A
+"Contribution" is the original Work, or any additions or changes to it.
+
+A Work made available under this UNLICENSE may be protected by copyright and
+related or neighboring rights ("Copyright and Related Rights"). Copyright and
+Related Rights include, but are not limited to, the following:
+
+1. the right to reproduce, adapt, distribute, perform, display, communicate, and
+ translate a Work;
+
+2. moral rights retained by the original author(s) and/or performer(s);
+
+3. publicity and privacy rights pertaining to a person's image or likeness
+ depicted in a Work;
+
+4. rights protecting against unfair competition in regards to a Work, subject to
+ the Limitations and Disclaimers, below;
+
+5. rights protecting the extraction, dissemination, use, and reuse of data in a
+ Work;
+
+6. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation thereof,
+ including any amended or successor version of such directive); and
+
+7. other similar, equivalent or corresponding rights throughout the world based
+ on applicable law or treaty, and any national implementations thereof.
+
+## Waiver
+
+To the greatest extent permitted by, but not in contravention of, applicable
+law, the Authors hereby overtly, fully, permanently, irrevocably, and
+unconditionally waive, abandon, and surrender all of their Copyright and Related
+Rights and associated claims and causes of action, whether now known or unknown
+(including existing as well as future claims and causes of action), in the Work
+(i) in all territories worldwide, (ii) for the maximum duration provided by
+applicable law or treaty (including future time extensions), (iii) in any
+current or future medium and for any number of copies, and (iv) for any purpose
+whatsoever, including without limitation commercial, advertising, or promotional
+purposes (the "Waiver").
+
+The Authors make the Waiver for the benefit of each member of the public at
+large and to the detriment of their heirs and successors, fully intending that
+such Waiver shall not be subject to revocation, rescission, cancellation,
+termination, or any other legal or equitable action to disrupt the quiet
+enjoyment of the Work by the public as contemplated by the Authors' express
+Statement of Purpose.
+
+## Public License Fallback
+
+Should any part of the Waiver for any reason be judged legally invalid or
+ineffective under applicable law, then the Waiver shall be preserved to the
+maximum extent permitted taking into account the Authors' express Statement of
+Purpose. In addition, to the extent the Waiver is so judged the Authors hereby
+grant to each affected person a royalty-free, non transferable, non
+sublicensable, non exclusive, irrevocable, and unconditional license to exercise
+the Authors' Copyright and Related Rights in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or treaty
+(including future time extensions), (iii) in any current or future medium, and
+for any number of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising, or promotional purposes (the "Public
+License").
+
+The Public License shall be deemed effective as of the date this UNLICENSE was
+first applied to the Work. Should any part of the Public License for any reason
+be judged legally invalid or ineffective under applicable law, such partial
+invalidity or ineffectiveness shall not invalidate the remainder of the Public
+License, and in such case the Authors hereby affirm that they will not (i)
+exercise any of their remaining Copyright and Related Rights in the Work or (ii)
+assert any associated claims and causes of action with respect to the Work, in
+either case contrary to their express Statement of Purpose.
+
+## Grant of Patent Rights
+
+The Authors hereby grant to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section) patent
+license to make, have made, use, offer to sell, sell, import, transfer, and
+otherwise run, modify, and propagate the contents of this Work, where such
+license applies only to those patent claims, both currently owned or controlled
+by any of the Authors and acquired in the future, licensable by any of the
+Authors that are necessarily infringed by this Work.
+
+This grant does not include claims that would be infringed only as a consequence
+of further modification of this Work.
+
+If you or your agent or exclusive licensee institute or order or agree to the
+institution of patent litigation against any entity (including a cross-claim or
+counterclaim in a lawsuit) alleging that this Work or any Contribution
+incorporated within this Work constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent rights
+granted to you under this Grant of Patent Rights for the Work shall terminate as
+of the date such litigation is filed.
+
+## Limitations and Disclaimers
+
+1. No trademark rights held by any of the Authors are waived, abandoned,
+ surrendered, licensed, or otherwise affected by this document.
+
+2. The Authors offer the Work as-is and makes no representations or warranties
+ of any kind concerning the Work, express, implied, statutory, or otherwise,
+ including without limitation warranties of title, merchantability, fitness
+ for a particular purpose, non infringement, or the absence of latent or other
+ defects, accuracy, or the present or absence of errors, whether or not
+ discoverable, all to the greatest extent permissible under applicable law.
+
+ In no event shall the Authors be liable for any direct, indirect, incidental,
+ special, exemplary, or consequential damages (including, but not limited to,
+ procurement of substitute goods or services; loss of use, data, or profits;
+ or business interruption) however caused and on any theory of liability,
+ whether in contract, strict liability, or tort (including negligence or
+ otherwise) arising in any way out of the use of the Work, even if advised of
+ the possibility of such damage.
+
+3. The Authors disclaim responsibility for clearing rights of other persons that
+ may apply to the Work or any use thereof, including without limitation any
+ person's Copyright and Related Rights in the Work. Further, the Authors
+ disclaim responsibility for obtaining any necessary consents, permissions or
+ other rights required for any use of the Work.
+
+## Appendix
+
+- The text of this document is derived from [Creative Commons CC0 1.0 Universal]
+ and the [Patent Grant] that ships with Google Go.
+
+- This UNLICENSE is seen as a mere transitional requirement until metanational
+ law adapts to the post intellectual property reality.
+
+[`authors.eon`]: AUTHORS.eon
+[`thirdparty.eon`]: THIRDPARTY.eon
+[creative commons cc0 1.0 universal]: https://creativecommons.org/publicdomain/zero/1.0/legalcode
+[patent grant]: https://github.com/golang/go/blob/master/PATENTS
|
espra/espra | 4964e49615440d82600b12f4506b2bd30009ba25 | pkg/runes: benchmark ToBytes against the builtin functions | diff --git a/pkg/runes/runes_test.go b/pkg/runes/runes_test.go
index aa00ed6..eaefd6b 100644
--- a/pkg/runes/runes_test.go
+++ b/pkg/runes/runes_test.go
@@ -1,26 +1,53 @@
// Public Domain (-) 2021-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
package runes
import (
"bytes"
"testing"
)
+var (
+ saveLen int
+)
+
+var (
+ textLarge = []rune("ä¸é£å®å£ä»åç¹éå¤è½æ¢æ³°ä¸å¹´åºé£ãè¬çæ³äºå±æéæ¥å¾´å¤é½è¼å¤åãéºèå¥è¡æ²é²é¥è¡¨ææ°çç½èç´¢æ®èçµè¦è
äºãå³éå¿
å¾è¿
å颿å¢é¡è»¢åãæ²»çµéªé½è³ªä½æ´èªå¹
é¢åéè¨æ³å½¼ãç®ç¶æ±ºè¸çè¼æ¨å¯¾é é«ç´°è¨éãç´¢ææ¨©è§£æ
ç§å
åç«è¦æå½¹å°é¢è¡å¤ã叿¡å¸åæ¸æ
åç®äºççºå質å¤çºæ¸å¤©é«æå³ãè¦§åæ¨©å¾¹ææ¸è¨´å±å©é両ä¿é²ä»£éè¸å¥³å
¨ã")
+ textSmall = []rune("Hello, ä¸ç")
+)
+
type testrunes struct {
runes []rune
}
+func BenchmarkToBytes(b *testing.B) {
+ l := 0
+ for i := 0; i < b.N; i++ {
+ l += len(ToBytes(textLarge))
+ l += len(ToBytes(textSmall))
+ }
+ saveLen = l
+}
+
+func BenchmarkToBytesSimple(b *testing.B) {
+ l := 0
+ for i := 0; i < b.N; i++ {
+ l += len([]byte(string(textLarge)))
+ l += len([]byte(string(textSmall)))
+ }
+ saveLen = l
+}
+
func TestToBytes(t *testing.T) {
for _, tt := range []testrunes{
{[]rune("Hello, ä¸ç")},
{[]rune{-1, 2047, 0xd800, 0x0010fff0, 0x0f10ffff}},
} {
got := ToBytes(tt.runes)
want := []byte(string(tt.runes))
if !bytes.Equal(got, want) {
t.Errorf("string(ToBytes([]rune(%q))) = %q", string(tt.runes), string(got))
}
}
}
|
espra/espra | dc64ae799dc27f8f5d1af07969cbd5a596a60ec0 | pkg/runes: add basic functions for dealing with runes | diff --git a/pkg/runes/runes.go b/pkg/runes/runes.go
new file mode 100644
index 0000000..3243afa
--- /dev/null
+++ b/pkg/runes/runes.go
@@ -0,0 +1,52 @@
+// Public Domain (-) 2021-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// Package runes implements simple functions to manipulate Unicode runes.
+package runes
+
+import (
+ "unicode/utf8"
+)
+
+const (
+ max = 0x0010ffff
+ max1 = 1<<7 - 1
+ max2 = 1<<11 - 1
+ max3 = 1<<16 - 1
+ smax = 0xdfff
+ smin = 0xd800
+)
+
+// ByteLen returns the byte length of the rune when encoded into UTF-8 using
+// `utf8.EncodeRune`.
+func ByteLen(r rune) int {
+ switch {
+ case r < 0:
+ return 3
+ case r <= max1:
+ return 1
+ case r <= max2:
+ return 2
+ case smin <= r && r <= smax:
+ return 3
+ case r <= max3:
+ return 3
+ case r <= max:
+ return 4
+ }
+ return 3
+}
+
+// ToBytes converts a rune slice to a UTF-8 encoded byte slice.
+func ToBytes(s []rune) []byte {
+ l := 0
+ for i := 0; i < len(s); i++ {
+ l += ByteLen(s[i])
+ }
+ b := make([]byte, l)
+ l = 0
+ for i := 0; i < len(s); i++ {
+ l += utf8.EncodeRune(b[l:], s[i])
+ }
+ return b
+}
diff --git a/pkg/runes/runes_test.go b/pkg/runes/runes_test.go
new file mode 100644
index 0000000..aa00ed6
--- /dev/null
+++ b/pkg/runes/runes_test.go
@@ -0,0 +1,26 @@
+// Public Domain (-) 2021-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package runes
+
+import (
+ "bytes"
+ "testing"
+)
+
+type testrunes struct {
+ runes []rune
+}
+
+func TestToBytes(t *testing.T) {
+ for _, tt := range []testrunes{
+ {[]rune("Hello, ä¸ç")},
+ {[]rune{-1, 2047, 0xd800, 0x0010fff0, 0x0f10ffff}},
+ } {
+ got := ToBytes(tt.runes)
+ want := []byte(string(tt.runes))
+ if !bytes.Equal(got, want) {
+ t.Errorf("string(ToBytes([]rune(%q))) = %q", string(tt.runes), string(got))
+ }
+ }
+}
|
espra/espra | 116d75154aa8d4ec461f37f201731983c97e4158 | pkg/term: make the cursor Pos a non-pointer | diff --git a/pkg/term/term.go b/pkg/term/term.go
index 1598532..173f035 100644
--- a/pkg/term/term.go
+++ b/pkg/term/term.go
@@ -1,708 +1,725 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// Package term provides support for interacting with terminals.
package term
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"os"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"web4.cc/pkg/process"
)
// Control characters.
const (
KeyNull InputKey = iota
KeyCtrlA
KeyCtrlB
KeyCtrlC
KeyCtrlD
KeyCtrlE
KeyCtrlF
KeyCtrlG
KeyCtrlH
KeyCtrlI
KeyCtrlJ
KeyCtrlK
KeyCtrlL
KeyCtrlM
KeyCtrlN
KeyCtrlO
KeyCtrlP
KeyCtrlQ
KeyCtrlR
KeyCtrlS
KeyCtrlT
KeyCtrlU
KeyCtrlV
KeyCtrlW
KeyCtrlX
KeyCtrlY
KeyCtrlZ
KeyCtrlLeftBracket
KeyCtrlBackslash
KeyCtrlRightBracket
KeyCtrlCaret
KeyCtrlUnderscore
)
// Common aliases for control characters.
const (
KeyBackspace InputKey = 127
KeyEnter InputKey = '\n'
KeyEscape InputKey = 27
KeyInterrupt InputKey = KeyCtrlC
KeyTab InputKey = '\t'
)
// Arrow keys.
const (
KeyDown InputKey = iota + 128
KeyEnd
KeyHome
KeyLeft
KeyPageDown
KeyPageUp
KeyRight
KeyUp
)
const isWindows = runtime.GOOS == "windows"
// Error values.
var (
- errInvalidResponse = errors.New("term: invalid response from terminal")
+ errInvalidResponse = errors.New("invalid input from terminal")
)
var (
cursorHidden bool
mu sync.Mutex // protects cursorHidden
)
// Device represents a device file that has been "converted" using `MakeRaw`.
//
// Devices must be reset by calling the `Reset` method before the program exits.
// Otherwise, external systems like the user's shell might be left in a broken
// state.
type Device struct {
d *device
}
// Read does a raw read on the device.
func (d *Device) Read(p []byte) (int, error) {
return d.d.Read(p)
}
// Reset resets the device file back to its initial state before it was
// converted.
func (d *Device) Reset() error {
return setDevice(d.d)
}
// Dimensions represents the window dimensions for the terminal.
type Dimensions struct {
Cols int
Rows int
}
// Input represents input received from the terminal. The `Byte` field
// represents non-control characters. When the `Byte` field is `0`, then the
// `Key` field represents a control character or arrow key.
//
// Note that both `\n` and `\r` are mapped to `KeyEnter` for simplicity, and
// that the `\n` and `\t` characters are returned in the `Key` field, while the
// space character `' '` is returned in the `Byte` field.
type Input struct {
Byte byte
Key InputKey
}
// InputKey represents a special input key received from the terminal. This
// encompasses both control characters and arrow keys.
type InputKey int
// Pos represents the cursor position on the terminal.
type Pos struct {
Col int
Row int
}
// RawConfig specifies the configuration for `MakeRaw`. It can be specified
// using one of the `RawOption` functions.
type RawConfig struct {
block bool
canon bool
crnl bool
sig bool
}
// RawOption functions configure `RawConfig` for `MakeRaw` calls.
type RawOption func(*RawConfig)
// Screen provides an interface for building interactive terminal applications.
//
// On UNIX systems, `Screen` reads and writes from `/dev/tty`. On Windows, it
// reads from `CONIN$` and writes to `CONOUT$`.
type Screen struct {
dev *Device
in *os.File
intr bool
pending []byte
out *os.File
}
// Bell tells the terminal to emit a beep/bell.
func (s *Screen) Bell() {
s.out.WriteString("\x07")
}
// ClearLine clears the current line.
func (s *Screen) ClearLine() {
s.out.WriteString("\x1b[2K")
}
// ClearLineToEnd clears everything from the cursor to the end of the current
// line.
func (s *Screen) ClearLineToEnd() {
s.out.WriteString("\x1b[0K")
}
// ClearLineToStart clears everything from the cursor to the start of the
// current line.
func (s *Screen) ClearLineToStart() {
s.out.WriteString("\x1b[1K")
}
// ClearScreen clears the screen and moves the cursor to the top left.
func (s *Screen) ClearScreen() {
s.out.WriteString("\x1b[2J\x1b[H")
}
// ClearToEnd clears everything from the cursor to the end of the screen.
func (s *Screen) ClearToEnd() {
s.out.WriteString("\x1b[0J")
}
// ClearToStart clears everything from the cursor to the start of the screen.
func (s *Screen) ClearToStart() {
s.out.WriteString("\x1b[1J")
}
// CursorDown moves the cursor down by the given amount.
func (s *Screen) CursorDown(n int) {
fmt.Fprintf(s.out, "\x1b[%dB", n)
}
// CursorLeft moves the cursor left by the given amount.
func (s *Screen) CursorLeft(n int) {
fmt.Fprintf(s.out, "\x1b[%dD", n)
}
// CursorPos returns the current position of the cursor.
-func (s *Screen) CursorPos() (*Pos, error) {
- if err := s.makeRaw(); err != nil {
- return nil, err
- }
- defer s.reset()
- // Query the terminal.
- if _, err := s.out.WriteString("\x1b[6n"); err != nil {
- return nil, err
- }
- // Read the response.
- buf := [20]byte{}
- i := 0
- for i < len(buf) {
- n, err := s.Read(buf[i : i+1])
- if n == 1 && buf[i] == 'R' {
- break
- }
- if err != nil {
- return nil, err
- }
- i++
- }
- // Exit on invalid data from the device.
- if !(i >= 5 && buf[0] == '\x1b' && buf[1] == '[') {
- return nil, errInvalidResponse
- }
- // Parse the response to get the position.
- split := bytes.Split(buf[2:i], []byte{';'})
- if len(split) != 2 {
- return nil, errInvalidResponse
- }
- row, err := strconv.ParseUint(string(split[0]), 10, 16)
- if err != nil {
- return nil, errInvalidResponse
- }
- col, err := strconv.ParseUint(string(split[1]), 10, 16)
+func (s *Screen) CursorPos() (Pos, error) {
+ pos, err := s.cursorPos()
if err != nil {
- return nil, errInvalidResponse
+ return pos, fmt.Errorf("term: unable to get cursor position: %s", err)
}
- return &Pos{
- Col: int(col),
- Row: int(row),
- }, nil
+ return pos, nil
}
// CursorRight moves the cursor right by the given amount.
func (s *Screen) CursorRight(n int) {
fmt.Fprintf(s.out, "\x1b[%dC", n)
}
// CursorTo moves the cursor to the given position.
-func (s *Screen) CursorTo(row, column int) {
- fmt.Fprintf(s.out, "\x1b[%d;%dH", row, column)
+func (s *Screen) CursorTo(pos Pos) {
+ fmt.Fprintf(s.out, "\x1b[%d;%dH", pos.Row, pos.Col)
}
// CursorUp moves the cursor up by the given amount.
func (s *Screen) CursorUp(n int) {
fmt.Fprintf(s.out, "\x1b[%dA", n)
}
// HideCursor hides the cursor on the terminal. It also registers a
// `process.Exit` handler to restore the cursor when the process exits.
func (s *Screen) HideCursor() {
mu.Lock()
if !cursorHidden {
process.SetExitHandler(s.ShowCursor)
cursorHidden = true
}
mu.Unlock()
s.out.WriteString("\x1b[?25l")
}
// Interruptible sets whether the reading of input from the terminal can be
// interrupted by certain control characters. If interruptible:
//
// * `^C` exits the process with an exit code of 130.
//
// * `^\` aborts the process with a panic and prints stacktraces.
//
// All `Screen` instances are interruptible by default.
func (s *Screen) Interruptible(state bool) {
s.intr = state
}
// Print formats the operands like `fmt.Print` and writes to the terminal
// output.
func (s *Screen) Print(a ...interface{}) (n int, err error) {
return fmt.Fprint(s, a...)
}
// Printf formats the operands like `fmt.Printf` and writes to the terminal
// output.
func (s *Screen) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(s, format, a...)
}
// Println formats the operands like `fmt.Println` and writes to the terminal
// output.
func (s *Screen) Println(a ...interface{}) (n int, err error) {
return fmt.Fprintln(s, a...)
}
// Read reads the terminal input.
func (s *Screen) Read(p []byte) (n int, err error) {
if s.dev == nil {
if err := s.makeRaw(); err != nil {
return 0, err
}
defer s.reset()
}
if !s.intr {
return s.in.Read(p)
}
n, err = s.in.Read(p)
if n > 0 {
for _, char := range p[:n] {
switch InputKey(char) {
case KeyCtrlC:
s.reset()
process.Exit(130) // 128 + SIGINT (signal 2)
case KeyCtrlBackslash:
s.reset()
process.Crash()
}
}
}
return n, err
}
// ReadInput reads `Input` from the terminal.
func (s *Screen) ReadInput() (*Input, error) {
if err := s.makeRaw(); err != nil {
return nil, err
}
defer s.reset()
var buf []byte
rem := false
if len(s.pending) > 0 {
buf = s.pending
rem = true
} else {
buf = make([]byte, 1)
_, err := s.Read(buf)
if err != nil {
return nil, err
}
}
defer func() {
if rem {
s.pending = s.pending[1:]
}
}()
switch char := buf[0]; char {
case '\r':
return &Input{
Key: KeyEnter,
}, nil
case 27:
key := InputKey(0)
seq := make([]byte, 1)
_, err := s.in.Read(seq)
if err != nil {
goto escape
}
if seq[0] != '[' {
s.pending = append(s.pending, seq[0])
goto escape
}
_, err = s.in.Read(seq)
if err != nil {
s.pending = append(s.pending, '[')
goto escape
}
switch seq[0] {
case 'A':
key = KeyUp
case 'B':
key = KeyDown
case 'C':
key = KeyRight
case 'D':
key = KeyLeft
case 'F':
key = KeyEnd
case 'H':
key = KeyHome
case '5', '6':
seq2 := make([]byte, 1)
_, err = s.in.Read(seq2)
if err != nil {
s.pending = append(s.pending, '[', seq[0])
goto escape
}
if seq2[0] != '~' {
s.pending = append(s.pending, '[', seq[0], seq2[0])
goto escape
}
switch seq[0] {
case '5':
key = KeyPageUp
case '6':
key = KeyPageDown
}
default:
s.pending = append(s.pending, '[', seq[0])
goto escape
}
return &Input{
Key: key,
}, nil
escape:
return &Input{
Key: KeyEscape,
}, nil
default:
if char < 32 || char == 127 {
return &Input{
Key: InputKey(char),
}, nil
}
return &Input{
Byte: char,
}, nil
}
}
// ReadSecret prompts the user for a secret without echoing. The prompt is
// written to `os.Stderr` as that is more likely to be seen, e.g. if a user has
// redirected stdout.
//
// Unlike the other read-related methods, this method defers to the platform for
// processing input and handling interrupt characters like `^C`. Special
// consideration is only given to the backspace character which overwrites the
// previous byte when one is present.
func (s *Screen) ReadSecret(prompt string) ([]byte, error) {
if prompt != "" {
_, err := os.Stderr.WriteString(prompt)
if err != nil {
return nil, err
}
}
if err := s.makeRaw(Canonical, GenSignals); err != nil {
return nil, err
}
defer s.reset()
return s.readline()
}
// Readline keeps reading from the terminal input until a `\n` (or `\r` on
// Windows) is encountered.
func (s *Screen) Readline() ([]byte, error) {
return nil, nil
}
// ReadlineWithPrompt emits the given `prompt` to the terminal output before
// invoking `Readline`.
func (s *Screen) ReadlineWithPrompt(prompt string) ([]byte, error) {
_, err := s.out.WriteString(prompt)
if err != nil {
return nil, err
}
return s.Readline()
}
// ShowCursor makes the cursor visible.
func (s *Screen) ShowCursor() {
s.out.WriteString("\x1b[?25h")
}
// TrueColor returns whether the terminal supports 24-bit colors.
func (s *Screen) TrueColor() bool {
return s.trueColor(os.Getenv("COLORTERM"))
}
// Write writes to the terminal output.
func (s *Screen) Write(p []byte) (n int, err error) {
return s.out.Write(p)
}
// WriteString is like `Write`, but writes a string instead of a byte slice.
func (s *Screen) WriteString(p string) (n int, err error) {
return s.Write([]byte(p))
}
+func (s *Screen) cursorPos() (Pos, error) {
+ if err := s.makeRaw(); err != nil {
+ return Pos{}, err
+ }
+ defer s.reset()
+ // Query the terminal.
+ if _, err := s.out.WriteString("\x1b[6n"); err != nil {
+ return Pos{}, err
+ }
+ // Read the response after draining any unexpected input.
+ buf := [20]byte{}
+ for {
+ n, err := s.Read(buf[0:1])
+ if n == 1 && buf[0] == '\x1b' {
+ break
+ }
+ if err != nil {
+ return Pos{}, err
+ }
+ }
+ i := 0
+ for i < len(buf) {
+ n, err := s.Read(buf[i : i+1])
+ if n == 1 && buf[i] == 'R' {
+ break
+ }
+ if err != nil {
+ return Pos{}, err
+ }
+ i++
+ }
+ // Exit on invalid data from the device.
+ if !(i >= 4 && buf[0] == '[') {
+ return Pos{}, errInvalidResponse
+ }
+ // Parse the response to get the position.
+ split := bytes.Split(buf[1:i], []byte{';'})
+ if len(split) != 2 {
+ return Pos{}, errInvalidResponse
+ }
+ row, err := strconv.ParseUint(string(split[0]), 10, 16)
+ if err != nil {
+ return Pos{}, errInvalidResponse
+ }
+ col, err := strconv.ParseUint(string(split[1]), 10, 16)
+ if err != nil {
+ return Pos{}, errInvalidResponse
+ }
+ return Pos{
+ Col: int(col),
+ Row: int(row),
+ }, nil
+}
+
func (s *Screen) makeRaw(opts ...RawOption) error {
if s.dev != nil {
return nil
}
opts = append(opts, ConvertLineEndings)
d, err := MakeRaw(s.in, opts...)
if err != nil {
return err
}
s.dev = d
return nil
}
func (s *Screen) readline() ([]byte, error) {
var out []byte
buf := make([]byte, 1)
for {
n, err := s.Read(buf)
if n == 1 {
switch buf[0] {
case '\b', 127:
if len(out) > 0 {
out = out[:len(out)-1]
}
case '\n':
if !isWindows {
return out, nil
}
case '\r':
if isWindows {
return out, nil
}
default:
out = append(out, buf[0])
}
}
if err != nil {
if err == io.EOF {
return out, nil
}
return nil, err
}
}
}
func (s *Screen) reset() error {
if s.dev != nil {
err := s.dev.Reset()
s.dev = nil
return err
}
return nil
}
func (s *Screen) trueColor(env string) bool {
// NOTE(tav): We assume the terminal is not lying if COLORTERM has a valid
// value. However, this may not be set system-wide or forwarded via sudo,
// ssh, etc.
//
// So we fallback by setting a 24-bit value followed by a query to the
// terminal to see if it actually set the color. Unfortunately, some common
// terminals don't support DECRQSS SGR requests.
if env == "truecolor" || env == "24bit" {
return true
}
// Get the current cursor position.
prev, err := s.CursorPos()
if err != nil {
return false
}
if err := s.makeRaw(NonBlocking); err != nil {
return false
}
defer func() {
s.reset()
// If the cursor moved after the DECRQSS request, e.g. due to the
// terminal not parsing the request properly, move the cursor back and
// overwrite the unintended output.
now, err := s.CursorPos()
if err != nil {
return
}
- if now.Col != prev.Col || now.Row != prev.Row {
- s.CursorTo(prev.Row, prev.Col)
+ if now != prev {
+ s.CursorTo(prev)
s.ClearToEnd()
}
}()
// Set an unlikely foreground color, and then send the terminal a DECRQSS
// SGR request to see if it has set it.
_, err = s.out.WriteString("\x1b[38:2::1:2:3m\x1bP$qm\x1b\\")
if err != nil {
return false
}
// Give the terminal some time to respond.
time.Sleep(100 * time.Millisecond)
// Try reading a response.
buf := make([]byte, 32)
n, err := s.Read(buf)
// Make sure to clear the set style after reading the response.
defer s.out.WriteString("\x1b[0m")
// Exit early on invalid data.
if err != nil || n < 13 {
return false
}
resp := string(buf[:n])
if !strings.HasPrefix(resp, "\x1bP1$r") || !strings.HasSuffix(resp, "m\x1b\\") {
return false
}
return strings.Contains(resp, ":1:2:3m")
}
// Canonical provides a `RawOption` to enable line-based canonical/cooked input
// processing.
func Canonical(r *RawConfig) {
r.canon = true
}
// ConvertLineEndings provides a `RawOption` to automatically convert CR to NL
// on input, and NL to CRNL when writing output.
func ConvertLineEndings(r *RawConfig) {
r.crnl = true
}
// GenSignals provides a `RawOption` to turn control characters like `^C` and
// `^Z` into signals instead of passing them through directly as characters.
func GenSignals(r *RawConfig) {
r.sig = true
}
// IsTTY checks whether the given device file is connected to a terminal.
func IsTTY(f *os.File) bool {
return isTTY(int(f.Fd()))
}
// MakeRaw converts the given device file into "raw" mode, i.e. disables
// echoing, disables special processing of certain characters, etc.
func MakeRaw(f *os.File, opts ...RawOption) (*Device, error) {
cfg := &RawConfig{
block: true,
}
for _, opt := range opts {
opt(cfg)
}
d, err := getDevice(int(f.Fd()))
if err != nil {
return nil, err
}
if err := makeRaw(d, *cfg); err != nil {
return nil, err
}
return &Device{
d: d,
}, nil
}
// New instantiates a new `Screen` for interactive terminal applications.
func New() (*Screen, error) {
var (
err error
in *os.File
out *os.File
)
if isWindows {
in, err = os.OpenFile("CONIN$", os.O_RDWR, 0)
if err != nil {
return nil, fmt.Errorf("term: unable to open terminal input: %w", err)
}
out, err = os.OpenFile("CONOUT$", os.O_RDWR, 0)
if err != nil {
return nil, fmt.Errorf("term: unable to open terminal output: %w", err)
}
} else {
in, err = os.OpenFile("/dev/tty", os.O_RDWR|os.O_SYNC|syscall.O_NOCTTY, 0)
if err != nil {
return nil, fmt.Errorf("term: unable to open terminal input/output: %w", err)
}
out = in
}
return &Screen{
in: in,
intr: true,
out: out,
}, nil
}
// NonBlocking provides a `RawOption` that configures the device to simulate
// non-blocking behavior by allowing `Read` calls to return immediately when
// there is no data to read.
//
// This should be used sparingly as it could degrade performance. However, it
// should still be better than changing the blocking mode with `O_NONBLOCK`,
// e.g. changing the mode on stdin could easily break the shell under normal
// circumstances when the program exits.
func NonBlocking(r *RawConfig) {
r.block = false
}
// WatchResize sends updated dimensions whenever the terminal window is resized.
func WatchResize(ctx context.Context, f *os.File) (<-chan Dimensions, error) {
_, err := WindowSize(f)
if err != nil {
return nil, err
}
ch := make(chan Dimensions)
go watchResize(ctx, int(f.Fd()), ch)
return ch, nil
}
// WindowSize returns the dimensions of the terminal.
func WindowSize(f *os.File) (Dimensions, error) {
return windowSize(int(f.Fd()))
}
|
espra/espra | 92dbdada90316e841c6dd92237fd80d92675048d | pkg/term: add support for interacting with terminals | diff --git a/pkg/term/term.go b/pkg/term/term.go
new file mode 100644
index 0000000..1598532
--- /dev/null
+++ b/pkg/term/term.go
@@ -0,0 +1,708 @@
+// Public Domain (-) 2010-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// Package term provides support for interacting with terminals.
+package term
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "syscall"
+ "time"
+
+ "web4.cc/pkg/process"
+)
+
+// Control characters.
+const (
+ KeyNull InputKey = iota
+ KeyCtrlA
+ KeyCtrlB
+ KeyCtrlC
+ KeyCtrlD
+ KeyCtrlE
+ KeyCtrlF
+ KeyCtrlG
+ KeyCtrlH
+ KeyCtrlI
+ KeyCtrlJ
+ KeyCtrlK
+ KeyCtrlL
+ KeyCtrlM
+ KeyCtrlN
+ KeyCtrlO
+ KeyCtrlP
+ KeyCtrlQ
+ KeyCtrlR
+ KeyCtrlS
+ KeyCtrlT
+ KeyCtrlU
+ KeyCtrlV
+ KeyCtrlW
+ KeyCtrlX
+ KeyCtrlY
+ KeyCtrlZ
+ KeyCtrlLeftBracket
+ KeyCtrlBackslash
+ KeyCtrlRightBracket
+ KeyCtrlCaret
+ KeyCtrlUnderscore
+)
+
+// Common aliases for control characters.
+const (
+ KeyBackspace InputKey = 127
+ KeyEnter InputKey = '\n'
+ KeyEscape InputKey = 27
+ KeyInterrupt InputKey = KeyCtrlC
+ KeyTab InputKey = '\t'
+)
+
+// Arrow keys.
+const (
+ KeyDown InputKey = iota + 128
+ KeyEnd
+ KeyHome
+ KeyLeft
+ KeyPageDown
+ KeyPageUp
+ KeyRight
+ KeyUp
+)
+
+const isWindows = runtime.GOOS == "windows"
+
+// Error values.
+var (
+ errInvalidResponse = errors.New("term: invalid response from terminal")
+)
+
+var (
+ cursorHidden bool
+ mu sync.Mutex // protects cursorHidden
+)
+
+// Device represents a device file that has been "converted" using `MakeRaw`.
+//
+// Devices must be reset by calling the `Reset` method before the program exits.
+// Otherwise, external systems like the user's shell might be left in a broken
+// state.
+type Device struct {
+ d *device
+}
+
+// Read does a raw read on the device.
+func (d *Device) Read(p []byte) (int, error) {
+ return d.d.Read(p)
+}
+
+// Reset resets the device file back to its initial state before it was
+// converted.
+func (d *Device) Reset() error {
+ return setDevice(d.d)
+}
+
+// Dimensions represents the window dimensions for the terminal.
+type Dimensions struct {
+ Cols int
+ Rows int
+}
+
+// Input represents input received from the terminal. The `Byte` field
+// represents non-control characters. When the `Byte` field is `0`, then the
+// `Key` field represents a control character or arrow key.
+//
+// Note that both `\n` and `\r` are mapped to `KeyEnter` for simplicity, and
+// that the `\n` and `\t` characters are returned in the `Key` field, while the
+// space character `' '` is returned in the `Byte` field.
+type Input struct {
+ Byte byte
+ Key InputKey
+}
+
+// InputKey represents a special input key received from the terminal. This
+// encompasses both control characters and arrow keys.
+type InputKey int
+
+// Pos represents the cursor position on the terminal.
+type Pos struct {
+ Col int
+ Row int
+}
+
+// RawConfig specifies the configuration for `MakeRaw`. It can be specified
+// using one of the `RawOption` functions.
+type RawConfig struct {
+ block bool
+ canon bool
+ crnl bool
+ sig bool
+}
+
+// RawOption functions configure `RawConfig` for `MakeRaw` calls.
+type RawOption func(*RawConfig)
+
+// Screen provides an interface for building interactive terminal applications.
+//
+// On UNIX systems, `Screen` reads and writes from `/dev/tty`. On Windows, it
+// reads from `CONIN$` and writes to `CONOUT$`.
+type Screen struct {
+ dev *Device
+ in *os.File
+ intr bool
+ pending []byte
+ out *os.File
+}
+
+// Bell tells the terminal to emit a beep/bell.
+func (s *Screen) Bell() {
+ s.out.WriteString("\x07")
+}
+
+// ClearLine clears the current line.
+func (s *Screen) ClearLine() {
+ s.out.WriteString("\x1b[2K")
+}
+
+// ClearLineToEnd clears everything from the cursor to the end of the current
+// line.
+func (s *Screen) ClearLineToEnd() {
+ s.out.WriteString("\x1b[0K")
+}
+
+// ClearLineToStart clears everything from the cursor to the start of the
+// current line.
+func (s *Screen) ClearLineToStart() {
+ s.out.WriteString("\x1b[1K")
+}
+
+// ClearScreen clears the screen and moves the cursor to the top left.
+func (s *Screen) ClearScreen() {
+ s.out.WriteString("\x1b[2J\x1b[H")
+}
+
+// ClearToEnd clears everything from the cursor to the end of the screen.
+func (s *Screen) ClearToEnd() {
+ s.out.WriteString("\x1b[0J")
+}
+
+// ClearToStart clears everything from the cursor to the start of the screen.
+func (s *Screen) ClearToStart() {
+ s.out.WriteString("\x1b[1J")
+}
+
+// CursorDown moves the cursor down by the given amount.
+func (s *Screen) CursorDown(n int) {
+ fmt.Fprintf(s.out, "\x1b[%dB", n)
+}
+
+// CursorLeft moves the cursor left by the given amount.
+func (s *Screen) CursorLeft(n int) {
+ fmt.Fprintf(s.out, "\x1b[%dD", n)
+}
+
+// CursorPos returns the current position of the cursor.
+func (s *Screen) CursorPos() (*Pos, error) {
+ if err := s.makeRaw(); err != nil {
+ return nil, err
+ }
+ defer s.reset()
+ // Query the terminal.
+ if _, err := s.out.WriteString("\x1b[6n"); err != nil {
+ return nil, err
+ }
+ // Read the response.
+ buf := [20]byte{}
+ i := 0
+ for i < len(buf) {
+ n, err := s.Read(buf[i : i+1])
+ if n == 1 && buf[i] == 'R' {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ i++
+ }
+ // Exit on invalid data from the device.
+ if !(i >= 5 && buf[0] == '\x1b' && buf[1] == '[') {
+ return nil, errInvalidResponse
+ }
+ // Parse the response to get the position.
+ split := bytes.Split(buf[2:i], []byte{';'})
+ if len(split) != 2 {
+ return nil, errInvalidResponse
+ }
+ row, err := strconv.ParseUint(string(split[0]), 10, 16)
+ if err != nil {
+ return nil, errInvalidResponse
+ }
+ col, err := strconv.ParseUint(string(split[1]), 10, 16)
+ if err != nil {
+ return nil, errInvalidResponse
+ }
+ return &Pos{
+ Col: int(col),
+ Row: int(row),
+ }, nil
+}
+
+// CursorRight moves the cursor right by the given amount.
+func (s *Screen) CursorRight(n int) {
+ fmt.Fprintf(s.out, "\x1b[%dC", n)
+}
+
+// CursorTo moves the cursor to the given position.
+func (s *Screen) CursorTo(row, column int) {
+ fmt.Fprintf(s.out, "\x1b[%d;%dH", row, column)
+}
+
+// CursorUp moves the cursor up by the given amount.
+func (s *Screen) CursorUp(n int) {
+ fmt.Fprintf(s.out, "\x1b[%dA", n)
+}
+
+// HideCursor hides the cursor on the terminal. It also registers a
+// `process.Exit` handler to restore the cursor when the process exits.
+func (s *Screen) HideCursor() {
+ mu.Lock()
+ if !cursorHidden {
+ process.SetExitHandler(s.ShowCursor)
+ cursorHidden = true
+ }
+ mu.Unlock()
+ s.out.WriteString("\x1b[?25l")
+}
+
+// Interruptible sets whether the reading of input from the terminal can be
+// interrupted by certain control characters. If interruptible:
+//
+// * `^C` exits the process with an exit code of 130.
+//
+// * `^\` aborts the process with a panic and prints stacktraces.
+//
+// All `Screen` instances are interruptible by default.
+func (s *Screen) Interruptible(state bool) {
+ s.intr = state
+}
+
+// Print formats the operands like `fmt.Print` and writes to the terminal
+// output.
+func (s *Screen) Print(a ...interface{}) (n int, err error) {
+ return fmt.Fprint(s, a...)
+}
+
+// Printf formats the operands like `fmt.Printf` and writes to the terminal
+// output.
+func (s *Screen) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(s, format, a...)
+}
+
+// Println formats the operands like `fmt.Println` and writes to the terminal
+// output.
+func (s *Screen) Println(a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(s, a...)
+}
+
+// Read reads the terminal input.
+func (s *Screen) Read(p []byte) (n int, err error) {
+ if s.dev == nil {
+ if err := s.makeRaw(); err != nil {
+ return 0, err
+ }
+ defer s.reset()
+ }
+ if !s.intr {
+ return s.in.Read(p)
+ }
+ n, err = s.in.Read(p)
+ if n > 0 {
+ for _, char := range p[:n] {
+ switch InputKey(char) {
+ case KeyCtrlC:
+ s.reset()
+ process.Exit(130) // 128 + SIGINT (signal 2)
+ case KeyCtrlBackslash:
+ s.reset()
+ process.Crash()
+ }
+ }
+ }
+ return n, err
+}
+
+// ReadInput reads `Input` from the terminal.
+func (s *Screen) ReadInput() (*Input, error) {
+ if err := s.makeRaw(); err != nil {
+ return nil, err
+ }
+ defer s.reset()
+ var buf []byte
+ rem := false
+ if len(s.pending) > 0 {
+ buf = s.pending
+ rem = true
+ } else {
+ buf = make([]byte, 1)
+ _, err := s.Read(buf)
+ if err != nil {
+ return nil, err
+ }
+ }
+ defer func() {
+ if rem {
+ s.pending = s.pending[1:]
+ }
+ }()
+ switch char := buf[0]; char {
+ case '\r':
+ return &Input{
+ Key: KeyEnter,
+ }, nil
+ case 27:
+ key := InputKey(0)
+ seq := make([]byte, 1)
+ _, err := s.in.Read(seq)
+ if err != nil {
+ goto escape
+ }
+ if seq[0] != '[' {
+ s.pending = append(s.pending, seq[0])
+ goto escape
+ }
+ _, err = s.in.Read(seq)
+ if err != nil {
+ s.pending = append(s.pending, '[')
+ goto escape
+ }
+ switch seq[0] {
+ case 'A':
+ key = KeyUp
+ case 'B':
+ key = KeyDown
+ case 'C':
+ key = KeyRight
+ case 'D':
+ key = KeyLeft
+ case 'F':
+ key = KeyEnd
+ case 'H':
+ key = KeyHome
+ case '5', '6':
+ seq2 := make([]byte, 1)
+ _, err = s.in.Read(seq2)
+ if err != nil {
+ s.pending = append(s.pending, '[', seq[0])
+ goto escape
+ }
+ if seq2[0] != '~' {
+ s.pending = append(s.pending, '[', seq[0], seq2[0])
+ goto escape
+ }
+ switch seq[0] {
+ case '5':
+ key = KeyPageUp
+ case '6':
+ key = KeyPageDown
+ }
+ default:
+ s.pending = append(s.pending, '[', seq[0])
+ goto escape
+ }
+ return &Input{
+ Key: key,
+ }, nil
+ escape:
+ return &Input{
+ Key: KeyEscape,
+ }, nil
+ default:
+ if char < 32 || char == 127 {
+ return &Input{
+ Key: InputKey(char),
+ }, nil
+ }
+ return &Input{
+ Byte: char,
+ }, nil
+ }
+}
+
+// ReadSecret prompts the user for a secret without echoing. The prompt is
+// written to `os.Stderr` as that is more likely to be seen, e.g. if a user has
+// redirected stdout.
+//
+// Unlike the other read-related methods, this method defers to the platform for
+// processing input and handling interrupt characters like `^C`. Special
+// consideration is only given to the backspace character which overwrites the
+// previous byte when one is present.
+func (s *Screen) ReadSecret(prompt string) ([]byte, error) {
+ if prompt != "" {
+ _, err := os.Stderr.WriteString(prompt)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if err := s.makeRaw(Canonical, GenSignals); err != nil {
+ return nil, err
+ }
+ defer s.reset()
+ return s.readline()
+}
+
+// Readline keeps reading from the terminal input until a `\n` (or `\r` on
+// Windows) is encountered.
+func (s *Screen) Readline() ([]byte, error) {
+ return nil, nil
+}
+
+// ReadlineWithPrompt emits the given `prompt` to the terminal output before
+// invoking `Readline`.
+func (s *Screen) ReadlineWithPrompt(prompt string) ([]byte, error) {
+ _, err := s.out.WriteString(prompt)
+ if err != nil {
+ return nil, err
+ }
+ return s.Readline()
+}
+
+// ShowCursor makes the cursor visible.
+func (s *Screen) ShowCursor() {
+ s.out.WriteString("\x1b[?25h")
+}
+
+// TrueColor returns whether the terminal supports 24-bit colors.
+func (s *Screen) TrueColor() bool {
+ return s.trueColor(os.Getenv("COLORTERM"))
+}
+
+// Write writes to the terminal output.
+func (s *Screen) Write(p []byte) (n int, err error) {
+ return s.out.Write(p)
+}
+
+// WriteString is like `Write`, but writes a string instead of a byte slice.
+func (s *Screen) WriteString(p string) (n int, err error) {
+ return s.Write([]byte(p))
+}
+
+func (s *Screen) makeRaw(opts ...RawOption) error {
+ if s.dev != nil {
+ return nil
+ }
+ opts = append(opts, ConvertLineEndings)
+ d, err := MakeRaw(s.in, opts...)
+ if err != nil {
+ return err
+ }
+ s.dev = d
+ return nil
+}
+
+func (s *Screen) readline() ([]byte, error) {
+ var out []byte
+ buf := make([]byte, 1)
+ for {
+ n, err := s.Read(buf)
+ if n == 1 {
+ switch buf[0] {
+ case '\b', 127:
+ if len(out) > 0 {
+ out = out[:len(out)-1]
+ }
+ case '\n':
+ if !isWindows {
+ return out, nil
+ }
+ case '\r':
+ if isWindows {
+ return out, nil
+ }
+ default:
+ out = append(out, buf[0])
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ return out, nil
+ }
+ return nil, err
+ }
+ }
+}
+
+func (s *Screen) reset() error {
+ if s.dev != nil {
+ err := s.dev.Reset()
+ s.dev = nil
+ return err
+ }
+ return nil
+}
+
+func (s *Screen) trueColor(env string) bool {
+ // NOTE(tav): We assume the terminal is not lying if COLORTERM has a valid
+ // value. However, this may not be set system-wide or forwarded via sudo,
+ // ssh, etc.
+ //
+ // So we fallback by setting a 24-bit value followed by a query to the
+ // terminal to see if it actually set the color. Unfortunately, some common
+ // terminals don't support DECRQSS SGR requests.
+ if env == "truecolor" || env == "24bit" {
+ return true
+ }
+ // Get the current cursor position.
+ prev, err := s.CursorPos()
+ if err != nil {
+ return false
+ }
+ if err := s.makeRaw(NonBlocking); err != nil {
+ return false
+ }
+ defer func() {
+ s.reset()
+ // If the cursor moved after the DECRQSS request, e.g. due to the
+ // terminal not parsing the request properly, move the cursor back and
+ // overwrite the unintended output.
+ now, err := s.CursorPos()
+ if err != nil {
+ return
+ }
+ if now.Col != prev.Col || now.Row != prev.Row {
+ s.CursorTo(prev.Row, prev.Col)
+ s.ClearToEnd()
+ }
+ }()
+ // Set an unlikely foreground color, and then send the terminal a DECRQSS
+ // SGR request to see if it has set it.
+ _, err = s.out.WriteString("\x1b[38:2::1:2:3m\x1bP$qm\x1b\\")
+ if err != nil {
+ return false
+ }
+ // Give the terminal some time to respond.
+ time.Sleep(100 * time.Millisecond)
+ // Try reading a response.
+ buf := make([]byte, 32)
+ n, err := s.Read(buf)
+ // Make sure to clear the set style after reading the response.
+ defer s.out.WriteString("\x1b[0m")
+ // Exit early on invalid data.
+ if err != nil || n < 13 {
+ return false
+ }
+ resp := string(buf[:n])
+ if !strings.HasPrefix(resp, "\x1bP1$r") || !strings.HasSuffix(resp, "m\x1b\\") {
+ return false
+ }
+ return strings.Contains(resp, ":1:2:3m")
+}
+
+// Canonical provides a `RawOption` to enable line-based canonical/cooked input
+// processing.
+func Canonical(r *RawConfig) {
+ r.canon = true
+}
+
+// ConvertLineEndings provides a `RawOption` to automatically convert CR to NL
+// on input, and NL to CRNL when writing output.
+func ConvertLineEndings(r *RawConfig) {
+ r.crnl = true
+}
+
+// GenSignals provides a `RawOption` to turn control characters like `^C` and
+// `^Z` into signals instead of passing them through directly as characters.
+func GenSignals(r *RawConfig) {
+ r.sig = true
+}
+
+// IsTTY checks whether the given device file is connected to a terminal.
+func IsTTY(f *os.File) bool {
+ return isTTY(int(f.Fd()))
+}
+
+// MakeRaw converts the given device file into "raw" mode, i.e. disables
+// echoing, disables special processing of certain characters, etc.
+func MakeRaw(f *os.File, opts ...RawOption) (*Device, error) {
+ cfg := &RawConfig{
+ block: true,
+ }
+ for _, opt := range opts {
+ opt(cfg)
+ }
+ d, err := getDevice(int(f.Fd()))
+ if err != nil {
+ return nil, err
+ }
+ if err := makeRaw(d, *cfg); err != nil {
+ return nil, err
+ }
+ return &Device{
+ d: d,
+ }, nil
+}
+
+// New instantiates a new `Screen` for interactive terminal applications.
+func New() (*Screen, error) {
+ var (
+ err error
+ in *os.File
+ out *os.File
+ )
+ if isWindows {
+ in, err = os.OpenFile("CONIN$", os.O_RDWR, 0)
+ if err != nil {
+ return nil, fmt.Errorf("term: unable to open terminal input: %w", err)
+ }
+ out, err = os.OpenFile("CONOUT$", os.O_RDWR, 0)
+ if err != nil {
+ return nil, fmt.Errorf("term: unable to open terminal output: %w", err)
+ }
+ } else {
+ in, err = os.OpenFile("/dev/tty", os.O_RDWR|os.O_SYNC|syscall.O_NOCTTY, 0)
+ if err != nil {
+ return nil, fmt.Errorf("term: unable to open terminal input/output: %w", err)
+ }
+ out = in
+ }
+ return &Screen{
+ in: in,
+ intr: true,
+ out: out,
+ }, nil
+}
+
+// NonBlocking provides a `RawOption` that configures the device to simulate
+// non-blocking behavior by allowing `Read` calls to return immediately when
+// there is no data to read.
+//
+// This should be used sparingly as it could degrade performance. However, it
+// should still be better than changing the blocking mode with `O_NONBLOCK`,
+// e.g. changing the mode on stdin could easily break the shell under normal
+// circumstances when the program exits.
+func NonBlocking(r *RawConfig) {
+ r.block = false
+}
+
+// WatchResize sends updated dimensions whenever the terminal window is resized.
+func WatchResize(ctx context.Context, f *os.File) (<-chan Dimensions, error) {
+ _, err := WindowSize(f)
+ if err != nil {
+ return nil, err
+ }
+ ch := make(chan Dimensions)
+ go watchResize(ctx, int(f.Fd()), ch)
+ return ch, nil
+}
+
+// WindowSize returns the dimensions of the terminal.
+func WindowSize(f *os.File) (Dimensions, error) {
+ return windowSize(int(f.Fd()))
+}
diff --git a/pkg/term/term_unix.go b/pkg/term/term_unix.go
index ebc124d..c8ad2aa 100644
--- a/pkg/term/term_unix.go
+++ b/pkg/term/term_unix.go
@@ -1,144 +1,142 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// +build darwin dragonfly freebsd linux netbsd openbsd
package term
import (
"context"
"os"
"os/signal"
"syscall"
"golang.org/x/sys/unix"
)
type device struct {
fd int
termios *unix.Termios
}
func (d *device) Read(p []byte) (int, error) {
return unix.Read(d.fd, p)
}
-func disableEcho(d *device) error {
- t := *d.termios
- t.Iflag |= unix.ICRNL // Enable CR -> NL translation
- t.Lflag &^= unix.ECHO // Disable echoing
- t.Lflag |= 0 |
- unix.ICANON | // Enable canonical/cooked input processing
- unix.ISIG // Enable signal generation for characters like DSUSP, INTR, QUIT, and SUSP
- return setTermios(d.fd, &t)
-}
-
func getDevice(fd int) (*device, error) {
t, err := getTermios(fd)
return &device{fd, t}, err
}
func isTTY(fd int) bool {
_, err := getTermios(fd)
return err == nil
}
// This function behaves like `cfmakeraw` on various platforms:
//
// * FreeBSD:
// https://github.com/freebsd/freebsd-src/blob/master/lib/libc/gen/termios.c
//
// * Linux/glibc:
// https://sourceware.org/git/?p=glibc.git;a=blob;f=termios/cfmakeraw.c
//
// * OpenBSD:
// https://github.com/openbsd/src/blob/master/lib/libc/termios/cfmakeraw.c
//
// As well as like raw mode on other systems, e.g.
//
// * OpenSSH: https://github.com/openssh/openssh-portable/blob/master/sshtty.c
-func makeRaw(d *device, mode *RawMode) error {
+func makeRaw(d *device, cfg RawConfig) error {
// NOTE(tav): Given the historic nature of all of this, you are likely to
// find better documentation from early UNIX systems than from more "modern"
// systems.
t := *d.termios
// Configure input processing.
t.Iflag &^= 0 |
unix.BRKINT | // Ignore break conditions (in conjunction with IGNBRK)
unix.IGNCR | // Process CR
unix.IGNPAR | // Pass through bytes with framing/parity errors
unix.INLCR | // Disable NL -> CR translation
unix.INPCK | // Disable input parity checking
unix.ISTRIP | // Disable stripping of the high bit in 8-bit characters
unix.IXOFF | // Disable use of START and STOP characters for control flow on input
unix.IXON | // Disable use of START and STOP characters for control flow on output
unix.PARMRK // Do not mark framing/parity errors
t.Iflag |= unix.IGNBRK // Ignore break conditions
- if mode != nil && mode.DisableCRNL {
- t.Iflag &^= unix.ICRNL // Disable CR -> NL translation
- t.Oflag &^= unix.OPOST // Disable output post-processing
- } else {
+ if cfg.crnl {
t.Iflag |= unix.ICRNL // Enable CR -> NL translation
t.Oflag = unix.OPOST | // Enable output post-processing
unix.ONLCR // Enable NL -> CRNL translation
+ } else {
+ t.Iflag &^= unix.ICRNL // Disable CR -> NL translation
+ t.Oflag &^= unix.OPOST // Disable output post-processing
}
// Configure local terminal functions.
t.Lflag &^= 0 |
unix.ECHO | // Disable echoing
unix.ECHOE | // Disable echoing erasure of input by the ERASE character
unix.ECHOK | // Disable echoing of NL after the KILL character
unix.ECHONL | // Disable echoing of NL
- unix.ICANON | // Disable canonical/cooked input processing
- unix.IEXTEN | // Disable extended input processing like DISCARD and LNEXT
- unix.ISIG // Disable signal generation for characters like DSUSP, INTR, QUIT, and SUSP
+ unix.IEXTEN // Disable extended input processing like DISCARD and LNEXT
+ if cfg.canon {
+ t.Lflag |= unix.ICANON // Enable line-based canonical/cooked input processing
+ } else {
+ t.Lflag &^= unix.ICANON // Disable line-based canonical/cooked input processing
+ }
+ if cfg.sig {
+ t.Lflag |= unix.ISIG // Enable signal generation for characters like DSUSP, INTR, QUIT, and SUSP
+ } else {
+ t.Lflag &^= unix.ISIG // Disable signal generation for characters like DSUSP, INTR, QUIT, and SUSP
+ }
// Configure control modes.
t.Cflag &^= 0 |
unix.CSIZE | // Clear the current character size mask
unix.PARENB // Disable parity checking
t.Cflag |= 0 |
unix.CREAD | // Enable receiving of characters
unix.CS8 // Specify 8-bit character sizes
// Set the minimum number of bytes for read calls.
- if mode != nil && mode.NonBlocking {
- t.Cc[unix.VMIN] = 0
- } else {
+ if cfg.block {
t.Cc[unix.VMIN] = 1
+ } else {
+ t.Cc[unix.VMIN] = 0
}
// Disable timeouts on data transmissions.
t.Cc[unix.VTIME] = 0
return setTermios(d.fd, &t)
}
func setDevice(d *device) error {
return setTermios(d.fd, d.termios)
}
func watchResize(ctx context.Context, fd int, ch chan Dimensions) {
c := make(chan os.Signal, 100)
signal.Notify(c, syscall.SIGWINCH)
for {
select {
case <-ctx.Done():
signal.Stop(c)
close(ch)
return
case <-c:
dim, err := windowSize(fd)
if err != nil {
continue
}
ch <- dim
}
}
}
func windowSize(fd int) (Dimensions, error) {
w, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
if err != nil {
return Dimensions{}, err
}
return Dimensions{
Cols: int(w.Col),
Rows: int(w.Row),
}, nil
}
|
espra/espra | a17763e13df3c7ac9198833af7f08fa4867344f7 | pkg/term/style: add tests for ForceEnable/Disable | diff --git a/pkg/term/style/style_test.go b/pkg/term/style/style_test.go
index 24ded38..ddbfaaf 100644
--- a/pkg/term/style/style_test.go
+++ b/pkg/term/style/style_test.go
@@ -1,95 +1,103 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
package style
import (
"testing"
)
var tests = []testcase{
{Bold, "\x1b[1m"},
{Bold | Red, "\x1b[1;31m"},
{Bold | Red | WhiteBG, "\x1b[1;31;47m"},
{Bold | Bright | Red, "\x1b[1;91m"},
{Bold | Bright | Red | WhiteBG, "\x1b[1;91;107m"},
{Bold | Bright | Red | WhiteBG | Reset, "\x1b[0m"},
{Bold | Blink, "\x1b[1;5m"},
{Bold | Dim, "\x1b[1m"},
{Bold | Italic | Undercurl, "\x1b[1;3;4:3m"},
{Bold | Italic | Undercurl | Underline, "\x1b[1;3;4:3m"},
{Bold | Italic | Underline, "\x1b[1;3;4m"},
{Bold | Foreground256(100), "\x1b[1;38:5:100m"},
{Bold | Foreground256(100) | Background256(100), "\x1b[1;38:5:100;48:5:100m"},
{Bold | Foreground256(100) | Undercurl256(100), "\x1b[1;38:5:100;4:3;58:5:100m"},
{Bold | Background256(100), "\x1b[1;48:5:100m"},
{Bold | Undercurl256(100), "\x1b[1;4:3;58:5:100m"},
{Bold | ForegroundRGB(100, 90, 80), "\x1b[1;38:2::100:90:80m"},
{Bold | BackgroundRGB(100, 90, 80), "\x1b[1;48:2::100:90:80m"},
{Bold | UndercurlRGB(100, 90, 80), "\x1b[1;4:3;58:2::100:90:80m"},
{Bright, ""},
{Bright | Red, "\x1b[91m"},
{Dim, "\x1b[2m"},
{Invert | Italic | Strikethrough, "\x1b[3;7;9m"},
{Reset, "\x1b[0m"},
{Undercurl, "\x1b[4:3m"},
{Underline, "\x1b[4m"},
}
type testcase struct {
code Code
want string
}
func TestCodes(t *testing.T) {
for idx, tt := range tests {
got := tt.code.EscapeCodes()
if got != tt.want {
t.Errorf("test at idx %d = %q: want %q", idx, got, tt.want)
}
}
}
func TestEnabled(t *testing.T) {
got := isEnabled("0")
if got != false {
t.Errorf(`isEnabled("0") = %v: want false`, got)
}
got = isEnabled("1")
if got != true {
t.Errorf(`isEnabled("1") = %v: want true`, got)
}
got = isEnabled("2")
if got != true {
t.Errorf(`isEnabled("2") = %v: want true`, got)
}
+ ori := enabled
+ ForceEnable()
+ got = Enabled()
+ if got != true {
+ t.Errorf(`ForceEnable(); Enabled() = %v: want %v`, got, true)
+ }
+ ForceDisable()
got = Enabled()
- if got != enabled {
- t.Errorf(`Enabled() = %v: want %v`, got, enabled)
+ if got != false {
+ t.Errorf(`ForceDisable(); Enabled() = %v: want %v`, got, false)
}
+ enabled = ori
}
func TestWrap(t *testing.T) {
ori := enabled
enabled = true
got := Red.String()
want := "\x1b[31m"
if got != want {
t.Errorf(`COLOR=1 Red.String() = %q: want %q`, got, want)
}
got = Wrap("test", Bold|Red)
want = "\x1b[1;31mtest\x1b[0m"
if got != want {
t.Errorf(`COLOR=1 Wrap("test", Bold|Red) = %q: want %q`, got, want)
}
enabled = false
got = Red.String()
if got != "" {
t.Errorf(`COLOR=0 Red.String() = %q: want ""`, got)
}
got = Wrap("test", Bold|Red)
if got != "test" {
t.Errorf(`COLOR=0 Wrap("test", Bold|Red) = %q: want "test"`, got)
}
enabled = ori
}
|
espra/espra | 8552e9e1e399a061406c79cef468f4c134483bf0 | pkg/term/style: add support for controlling styled output | diff --git a/pkg/term/style/style.go b/pkg/term/style/style.go
index b1aa312..fddbc2c 100644
--- a/pkg/term/style/style.go
+++ b/pkg/term/style/style.go
@@ -1,328 +1,369 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// Package style provides support for styling terminal output.
//
-// See the documentation for the term package's `Style` function to understand
-// the mechanism used to detect whether to emit styled output.
+// See the documentation for the `Enabled` function to understand how styled
+// output support is determined.
package style
import (
+ "os"
"strconv"
"strings"
"web4.cc/pkg/term"
)
// Codes for various text effects.
const (
Blink Code = 1 << iota
Bold
Bright
Dim
Invert
Italic
Reset
Strikethrough
Undercurl
Underline
)
// Codes for the base foreground colors.
const (
Black Code = (iota << 16) | (1 << 10)
Red
Green
Yellow
Blue
Magenta
Cyan
White
)
// Codes for the base background colors.
const (
BlackBG Code = (iota << 40) | (1 << 12)
RedBG
GreenBG
YellowBG
BlueBG
MagentaBG
CyanBG
WhiteBG
)
-var style = term.Style()
+var enabled bool
// Code represents colors and text effects for styling terminal output. You can
// OR multiple codes together with the following exceptions:
//
// * If Reset is set, all other codes will be ignored.
//
// * The Bold and Dim text effects cannot be OR-ed together. If they are, only
// Bold will be applied.
//
// * The Undercurl and Underline text effects cannot be OR-ed together. If they
// are, only Undercurl will be applied.
//
// * The Bright text effect only works with the base foreground and background
// colors.
//
// * At most, only two colors can be OR-ed together: a foreground color and a
-// background color, or a foreground color and a undercurl color. Any other
+// background color, or a foreground color and an undercurl color. Any other
// color combination, e.g. a foreground with another foreground color, will
// result in undefined behavior.
type Code uint64
// NOTE(tav): The 64 bits of the Code value are structured as:
//
// - 10 bits for text effects
// - 1 bit to indicate an 8-bit foreground color
// - 1 bit to indicate a 24-bit foreground color
// - 1 bit to indicate an 8-bit background color
// - 1 bit to indicate a 24-bit background color
// - 1 bit to indicate an 8-bit undercurl color
// - 1 bit to indicate a 24-bit undercurl color
// - 24 bits for the foreground color
// - 24 bits for the background/undercurl color
// EscapeCodes returns the ANSI escape codes for the Code. This function doesn't
-// pay any heed to whether the terminal supports styled output or not.
+// pay any heed to whether styled output is enabled or not.
func (c Code) EscapeCodes() string {
if c == 0 || c&Reset != 0 {
return "\x1b[0m"
}
b := strings.Builder{}
b.WriteString("\x1b[")
bright := false
undercurl := false
written := false
// Handle text effects.
if c&1023 != 0 {
// NOTE(tav): Bold and Dim are exclusive of each other.
if c&Bold != 0 {
b.WriteByte('1')
written = true
} else if c&Dim != 0 {
b.WriteByte('2')
written = true
}
if c&Bright != 0 {
bright = true
}
if c&Italic != 0 {
if written {
b.WriteByte(';')
}
b.WriteByte('3')
written = true
}
// NOTE(tav): Undercurl and Underline are exclusive of each other.
if c&Undercurl != 0 {
if written {
b.WriteByte(';')
}
b.WriteString("4:3")
undercurl = true
written = true
} else if c&Underline != 0 {
if written {
b.WriteByte(';')
}
b.WriteByte('4')
written = true
}
if c&Blink != 0 {
if written {
b.WriteByte(';')
}
b.WriteByte('5')
written = true
}
if c&Invert != 0 {
if written {
b.WriteByte(';')
}
b.WriteByte('7')
written = true
}
if c&Strikethrough != 0 {
if written {
b.WriteByte(';')
}
b.WriteByte('9')
written = true
}
}
// Handle foreground colors.
c >>= 10
if c&3 != 0 {
color := (c >> 6) & 0xffffff
if c&1 != 0 {
if color <= 8 {
if written {
b.WriteByte(';')
}
if bright {
b.WriteByte('9')
b.WriteByte('0' + uint8(color))
} else {
b.WriteByte('3')
b.WriteByte('0' + uint8(color))
}
written = true
} else if color <= 255 {
if written {
b.WriteByte(';')
}
b.WriteString("38:5:")
b.WriteString(strconv.FormatUint(uint64(color), 10))
written = true
}
} else {
if written {
b.WriteByte(';')
}
b.WriteString("38:2::")
b.WriteString(strconv.FormatUint(uint64(color&0xff), 10))
b.WriteByte(':')
b.WriteString(strconv.FormatUint(uint64((color>>8)&0xff), 10))
b.WriteByte(':')
b.WriteString(strconv.FormatUint(uint64((color>>16)&0xff), 10))
written = true
}
}
// Handle background colors.
c >>= 2
if c&3 != 0 {
color := c >> 28
if c&1 != 0 {
if color <= 8 {
if written {
b.WriteByte(';')
}
if bright {
b.WriteByte('1')
b.WriteByte('0')
b.WriteByte('0' + uint8(color))
} else {
b.WriteByte('4')
b.WriteByte('0' + uint8(color))
}
written = true
} else if color <= 255 {
if written {
b.WriteByte(';')
}
b.WriteString("48:5:")
b.WriteString(strconv.FormatUint(uint64(color), 10))
written = true
}
} else {
if written {
b.WriteByte(';')
}
b.WriteString("48:2::")
b.WriteString(strconv.FormatUint(uint64(color&0xff), 10))
b.WriteByte(':')
b.WriteString(strconv.FormatUint(uint64((color>>8)&0xff), 10))
b.WriteByte(':')
b.WriteString(strconv.FormatUint(uint64((color>>16)&0xff), 10))
written = true
}
}
// Handle undercurl colors.
c >>= 2
if c&3 != 0 {
if written {
b.WriteByte(';')
}
if !undercurl {
b.WriteString("4:3;")
}
color := c >> 26
if c&1 != 0 {
b.WriteString("58:5:")
b.WriteString(strconv.FormatUint(uint64(color), 10))
written = true
} else {
b.WriteString("58:2::")
b.WriteString(strconv.FormatUint(uint64(color&0xff), 10))
b.WriteByte(':')
b.WriteString(strconv.FormatUint(uint64((color>>8)&0xff), 10))
b.WriteByte(':')
b.WriteString(strconv.FormatUint(uint64((color>>16)&0xff), 10))
written = true
}
}
if written {
b.WriteByte('m')
return b.String()
}
return ""
}
-// String returns the ANSI escape codes for the Code. If the terminal doesn't
-// support styled output, this function will return an empty string.
+// String returns the ANSI escape codes for the Code. If styled output is not
+// enabled, this function will return an empty string.
func (c Code) String() string {
- if !style {
+ if !enabled {
return ""
}
return c.EscapeCodes()
}
// Background256 returns the background color code representing the given 256
// color value.
func Background256(v uint8) Code {
return (Code(v) << 40) | (1 << 12)
}
// BackgroundRGB returns the background color code representing the given 24-bit
// color.
func BackgroundRGB(r uint8, g uint8, b uint8) Code {
code := Code(r) | (Code(g) << 8) | (Code(b) << 16)
return (code << 40) | (1 << 13)
}
+// Enabled returns whether styled output is enabled. This can be forced using
+// the `ForceEnable` and `ForceDisable` functions, but will default to using the
+// following heuristic:
+//
+// * If the environment variable TERMSTYLE=0, then styled output is disabled.
+//
+// * If TERMSTYLE=2, then styled output is enabled.
+//
+// * Otherwise, styled output is only enabled if stdout is connected to a
+// terminal.
+//
+// This heuristic is run at startup using the stdout value at `os.Stdout`.
+func Enabled() bool {
+ return enabled
+}
+
+// ForceDisable forces styled output to be disabled.
+func ForceDisable() {
+ enabled = false
+}
+
+// ForceEnable forces styled output to be enabled.
+func ForceEnable() {
+ enabled = true
+}
+
// ForceWrap encloses the given text with escape codes to stylize it and then
// resets the output back to normal. Unlike Wrap, this function doesn't pay any
-// heed to whether the terminal supports styled output or not.
+// heed to whether styled output is enabled or not.
func ForceWrap(s string, c Code) string {
- return c.String() + s + "\x1b[0m"
+ return c.EscapeCodes() + s + "\x1b[0m"
}
// Foreground256 returns the foreground color code representing the given 256
// color value.
func Foreground256(v uint8) Code {
return (Code(v) << 16) | (1 << 10)
}
// ForegroundRGB returns the foreground color code representing the given 24-bit
// color.
func ForegroundRGB(r uint8, g uint8, b uint8) Code {
code := Code(r) | (Code(g) << 8) | (Code(b) << 16)
return (code << 16) | (1 << 11)
}
// Undercurl256 returns an undercurl color code representing the given 256 color
// value.
func Undercurl256(v uint8) Code {
return (Code(v) << 40) | (1 << 14)
}
// UndercurlRGB returns an undercurl color code representing the given 24-bit
// color.
func UndercurlRGB(r uint8, g uint8, b uint8) Code {
code := Code(r) | (Code(g) << 8) | (Code(b) << 16)
return (code << 40) | (1 << 15)
}
// Wrap encloses the given text with escape codes to stylize it and then resets
-// the output back to normal. If the terminal doesn't support styled output,
-// this function will return the given text without any changes.
+// the output back to normal. If styled output is not enabled, this function
+// will return the given text without any changes.
func Wrap(s string, c Code) string {
- if !style {
+ if !enabled {
return s
}
return ForceWrap(s, c)
}
+
+func isEnabled(env string) bool {
+ switch env {
+ case "0":
+ return false
+ case "2":
+ return true
+ }
+ return term.IsTTY(os.Stdout)
+}
+
+func init() {
+ enabled = isEnabled(os.Getenv("TERMSTYLE"))
+}
diff --git a/pkg/term/style/style_test.go b/pkg/term/style/style_test.go
index 5d437b8..24ded38 100644
--- a/pkg/term/style/style_test.go
+++ b/pkg/term/style/style_test.go
@@ -1,76 +1,95 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
package style
import (
"testing"
)
var tests = []testcase{
{Bold, "\x1b[1m"},
{Bold | Red, "\x1b[1;31m"},
{Bold | Red | WhiteBG, "\x1b[1;31;47m"},
{Bold | Bright | Red, "\x1b[1;91m"},
{Bold | Bright | Red | WhiteBG, "\x1b[1;91;107m"},
{Bold | Bright | Red | WhiteBG | Reset, "\x1b[0m"},
{Bold | Blink, "\x1b[1;5m"},
{Bold | Dim, "\x1b[1m"},
{Bold | Italic | Undercurl, "\x1b[1;3;4:3m"},
{Bold | Italic | Undercurl | Underline, "\x1b[1;3;4:3m"},
{Bold | Italic | Underline, "\x1b[1;3;4m"},
{Bold | Foreground256(100), "\x1b[1;38:5:100m"},
{Bold | Foreground256(100) | Background256(100), "\x1b[1;38:5:100;48:5:100m"},
{Bold | Foreground256(100) | Undercurl256(100), "\x1b[1;38:5:100;4:3;58:5:100m"},
{Bold | Background256(100), "\x1b[1;48:5:100m"},
{Bold | Undercurl256(100), "\x1b[1;4:3;58:5:100m"},
{Bold | ForegroundRGB(100, 90, 80), "\x1b[1;38:2::100:90:80m"},
{Bold | BackgroundRGB(100, 90, 80), "\x1b[1;48:2::100:90:80m"},
{Bold | UndercurlRGB(100, 90, 80), "\x1b[1;4:3;58:2::100:90:80m"},
{Bright, ""},
{Bright | Red, "\x1b[91m"},
{Dim, "\x1b[2m"},
{Invert | Italic | Strikethrough, "\x1b[3;7;9m"},
{Reset, "\x1b[0m"},
{Undercurl, "\x1b[4:3m"},
{Underline, "\x1b[4m"},
}
type testcase struct {
code Code
want string
}
func TestCodes(t *testing.T) {
for idx, tt := range tests {
got := tt.code.EscapeCodes()
if got != tt.want {
t.Errorf("test at idx %d = %q: want %q", idx, got, tt.want)
}
}
}
+func TestEnabled(t *testing.T) {
+ got := isEnabled("0")
+ if got != false {
+ t.Errorf(`isEnabled("0") = %v: want false`, got)
+ }
+ got = isEnabled("1")
+ if got != true {
+ t.Errorf(`isEnabled("1") = %v: want true`, got)
+ }
+ got = isEnabled("2")
+ if got != true {
+ t.Errorf(`isEnabled("2") = %v: want true`, got)
+ }
+ got = Enabled()
+ if got != enabled {
+ t.Errorf(`Enabled() = %v: want %v`, got, enabled)
+ }
+}
+
func TestWrap(t *testing.T) {
- ori := style
- style = true
+ ori := enabled
+ enabled = true
got := Red.String()
want := "\x1b[31m"
if got != want {
t.Errorf(`COLOR=1 Red.String() = %q: want %q`, got, want)
}
got = Wrap("test", Bold|Red)
want = "\x1b[1;31mtest\x1b[0m"
if got != want {
t.Errorf(`COLOR=1 Wrap("test", Bold|Red) = %q: want %q`, got, want)
}
- style = false
+ enabled = false
got = Red.String()
if got != "" {
t.Errorf(`COLOR=0 Red.String() = %q: want ""`, got)
}
got = Wrap("test", Bold|Red)
if got != "test" {
t.Errorf(`COLOR=0 Wrap("test", Bold|Red) = %q: want "test"`, got)
}
- style = ori
+ enabled = ori
}
|
espra/espra | 5b7a4896bd6b9db7e193382971e9f2e66d7ccf2d | pkg/term: add platform-specific handling for unix systems | diff --git a/pkg/term/term_unix.go b/pkg/term/term_unix.go
new file mode 100644
index 0000000..ebc124d
--- /dev/null
+++ b/pkg/term/term_unix.go
@@ -0,0 +1,144 @@
+// Public Domain (-) 2010-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package term
+
+import (
+ "context"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+type device struct {
+ fd int
+ termios *unix.Termios
+}
+
+func (d *device) Read(p []byte) (int, error) {
+ return unix.Read(d.fd, p)
+}
+
+func disableEcho(d *device) error {
+ t := *d.termios
+ t.Iflag |= unix.ICRNL // Enable CR -> NL translation
+ t.Lflag &^= unix.ECHO // Disable echoing
+ t.Lflag |= 0 |
+ unix.ICANON | // Enable canonical/cooked input processing
+ unix.ISIG // Enable signal generation for characters like DSUSP, INTR, QUIT, and SUSP
+ return setTermios(d.fd, &t)
+}
+
+func getDevice(fd int) (*device, error) {
+ t, err := getTermios(fd)
+ return &device{fd, t}, err
+}
+
+func isTTY(fd int) bool {
+ _, err := getTermios(fd)
+ return err == nil
+}
+
+// This function behaves like `cfmakeraw` on various platforms:
+//
+// * FreeBSD:
+// https://github.com/freebsd/freebsd-src/blob/master/lib/libc/gen/termios.c
+//
+// * Linux/glibc:
+// https://sourceware.org/git/?p=glibc.git;a=blob;f=termios/cfmakeraw.c
+//
+// * OpenBSD:
+// https://github.com/openbsd/src/blob/master/lib/libc/termios/cfmakeraw.c
+//
+// As well as like raw mode on other systems, e.g.
+//
+// * OpenSSH: https://github.com/openssh/openssh-portable/blob/master/sshtty.c
+func makeRaw(d *device, mode *RawMode) error {
+ // NOTE(tav): Given the historic nature of all of this, you are likely to
+ // find better documentation from early UNIX systems than from more "modern"
+ // systems.
+ t := *d.termios
+ // Configure input processing.
+ t.Iflag &^= 0 |
+ unix.BRKINT | // Ignore break conditions (in conjunction with IGNBRK)
+ unix.IGNCR | // Process CR
+ unix.IGNPAR | // Pass through bytes with framing/parity errors
+ unix.INLCR | // Disable NL -> CR translation
+ unix.INPCK | // Disable input parity checking
+ unix.ISTRIP | // Disable stripping of the high bit in 8-bit characters
+ unix.IXOFF | // Disable use of START and STOP characters for control flow on input
+ unix.IXON | // Disable use of START and STOP characters for control flow on output
+ unix.PARMRK // Do not mark framing/parity errors
+ t.Iflag |= unix.IGNBRK // Ignore break conditions
+ if mode != nil && mode.DisableCRNL {
+ t.Iflag &^= unix.ICRNL // Disable CR -> NL translation
+ t.Oflag &^= unix.OPOST // Disable output post-processing
+ } else {
+ t.Iflag |= unix.ICRNL // Enable CR -> NL translation
+ t.Oflag = unix.OPOST | // Enable output post-processing
+ unix.ONLCR // Enable NL -> CRNL translation
+ }
+ // Configure local terminal functions.
+ t.Lflag &^= 0 |
+ unix.ECHO | // Disable echoing
+ unix.ECHOE | // Disable echoing erasure of input by the ERASE character
+ unix.ECHOK | // Disable echoing of NL after the KILL character
+ unix.ECHONL | // Disable echoing of NL
+ unix.ICANON | // Disable canonical/cooked input processing
+ unix.IEXTEN | // Disable extended input processing like DISCARD and LNEXT
+ unix.ISIG // Disable signal generation for characters like DSUSP, INTR, QUIT, and SUSP
+ // Configure control modes.
+ t.Cflag &^= 0 |
+ unix.CSIZE | // Clear the current character size mask
+ unix.PARENB // Disable parity checking
+ t.Cflag |= 0 |
+ unix.CREAD | // Enable receiving of characters
+ unix.CS8 // Specify 8-bit character sizes
+ // Set the minimum number of bytes for read calls.
+ if mode != nil && mode.NonBlocking {
+ t.Cc[unix.VMIN] = 0
+ } else {
+ t.Cc[unix.VMIN] = 1
+ }
+ // Disable timeouts on data transmissions.
+ t.Cc[unix.VTIME] = 0
+ return setTermios(d.fd, &t)
+}
+
+func setDevice(d *device) error {
+ return setTermios(d.fd, d.termios)
+}
+
+func watchResize(ctx context.Context, fd int, ch chan Dimensions) {
+ c := make(chan os.Signal, 100)
+ signal.Notify(c, syscall.SIGWINCH)
+ for {
+ select {
+ case <-ctx.Done():
+ signal.Stop(c)
+ close(ch)
+ return
+ case <-c:
+ dim, err := windowSize(fd)
+ if err != nil {
+ continue
+ }
+ ch <- dim
+ }
+ }
+}
+
+func windowSize(fd int) (Dimensions, error) {
+ w, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
+ if err != nil {
+ return Dimensions{}, err
+ }
+ return Dimensions{
+ Cols: int(w.Col),
+ Rows: int(w.Row),
+ }, nil
+}
|
espra/espra | a811132dd0ded047ce5407aa3877a17476cf4533 | pkg/process: add function to crash with stacktraces | diff --git a/pkg/process/process.go b/pkg/process/process.go
index f800957..643aa72 100644
--- a/pkg/process/process.go
+++ b/pkg/process/process.go
@@ -1,213 +1,221 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// Package process provides utilities for managing the current system process.
package process
import (
"context"
"fmt"
"os"
"os/signal"
"path/filepath"
+ "runtime/debug"
"sync"
"syscall"
)
// OSExit is the function used to terminate the current process. It defaults to
// os.Exit, but can be overridden for testing purposes.
var OSExit = os.Exit
var (
exitDisabled bool
exiting bool
mu sync.RWMutex // protects exitDisabled, exiting, registry
registry = map[os.Signal][]func(){}
testMode = false
testSig = make(chan struct{}, 10)
wait = make(chan struct{})
)
type lockFile struct {
file string
link string
}
func (l *lockFile) release() {
os.Remove(l.file)
os.Remove(l.link)
}
+// Crash will terminate the process with a panic that will generate stacktraces
+// for all user-generated goroutines.
+func Crash() {
+ debug.SetTraceback("all")
+ panic("abort")
+}
+
// CreatePIDFile writes the current process ID to a new file at the given path.
// The written file is removed when Exit is called, or when the process receives
// an os.Interrupt or SIGTERM signal.
func CreatePIDFile(path string) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0o660)
if err != nil {
return err
}
fmt.Fprintf(f, "%d", os.Getpid())
err = f.Close()
if err == nil {
SetExitHandler(func() {
os.Remove(path)
})
}
return err
}
// DisableAutoExit will prevent the process from automatically exiting after
// processing os.Interrupt or SIGTERM signals. This will not be enforced if Exit
// is called directly.
func DisableAutoExit() {
mu.Lock()
exitDisabled = true
mu.Unlock()
}
// Exit runs the registered exit handlers, as if the os.Interrupt signal had
// been sent, and then terminates the process with the given status code. Exit
// blocks until the process terminates if it has already been called elsewhere.
func Exit(code int) {
mu.Lock()
if exiting {
mu.Unlock()
if testMode {
testSig <- struct{}{}
}
<-wait
return
}
exiting = true
handlers := clone(registry[os.Interrupt])
mu.Unlock()
for _, handler := range handlers {
handler()
}
OSExit(code)
}
// Init tries to acquire a process lock and write the PID file for the current
// process.
func Init(directory string, name string) error {
if err := Lock(directory, name); err != nil {
return err
}
return CreatePIDFile(filepath.Join(directory, name+".pid"))
}
// Lock tries to acquire a process lock in the given directory. The acquired
// lock file is released when Exit is called, or when the process receives an
// os.Interrupt or SIGTERM signal.
//
// This function has only been tested for correctness on Unix systems with
// filesystems where link is atomic. It may not work as expected on NFS mounts
// or on platforms like Windows.
func Lock(directory string, name string) error {
file := filepath.Join(directory, fmt.Sprintf("%s-%d.lock", name, os.Getpid()))
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY, 0o660)
if err != nil {
return err
}
f.Close()
link := filepath.Join(directory, name+".lock")
err = os.Link(file, link)
if err != nil {
// We don't remove the lock file here so that calling Lock multiple
// times from the same process doesn't remove an existing lock.
return err
}
l := &lockFile{
file: file,
link: link,
}
SetExitHandler(l.release)
return nil
}
// ReapOrphans reaps orphaned child processes and returns whether there are any
// unterminated child processes that are still active.
//
// This is currently a no-op on all platforms except Linux.
func ReapOrphans() bool {
return reap()
}
// ResetHandlers drops all currently registered handlers.
func ResetHandlers() {
mu.Lock()
registry = map[os.Signal][]func(){}
mu.Unlock()
}
// RunReaper continuously attempts to reap orphaned child processes until the
// given context is cancelled.
//
// On Linux, this will register the current process as a child subreaper, and
// attempt to reap child processes whenever SIGCHLD is received. On all other
// platforms, this is currently a no-op.
func RunReaper(ctx context.Context) {
runReaper(ctx)
}
// SetExitHandler registers the given handler function to run when receiving
// os.Interrupt or SIGTERM signals. Registered handlers are executed in reverse
// order of when they were set.
func SetExitHandler(handler func()) {
mu.Lock()
registry[os.Interrupt] = prepend(registry[os.Interrupt], handler)
registry[syscall.SIGTERM] = prepend(registry[syscall.SIGTERM], handler)
mu.Unlock()
}
// SetSignalHandler registers the given handler function to run when receiving
// the specified signal. Registered handlers are executed in reverse order of
// when they were set.
func SetSignalHandler(signal os.Signal, handler func()) {
mu.Lock()
registry[signal] = prepend(registry[signal], handler)
mu.Unlock()
}
func clone(xs []func()) []func() {
ys := make([]func(), len(xs))
copy(ys, xs)
return ys
}
func handleSignals() {
notifier := make(chan os.Signal, 100)
signal.Notify(notifier)
go func() {
for sig := range notifier {
mu.Lock()
disabled := exitDisabled
if !disabled {
if sig == syscall.SIGTERM || sig == os.Interrupt {
exiting = true
}
}
handlers := clone(registry[sig])
mu.Unlock()
for _, handler := range handlers {
handler()
}
if !disabled {
if sig == syscall.SIGTERM || sig == os.Interrupt {
OSExit(1)
}
}
if testMode {
testSig <- struct{}{}
}
}
}()
}
func prepend(xs []func(), handler func()) []func() {
return append([]func(){handler}, xs...)
}
func init() {
handleSignals()
}
|
espra/espra | 73ad84531f8b14bcfc7f4572b1ffc077821bb641 | pkg/wsl: add support for detecting WSL | diff --git a/pkg/wsl/wsl.go b/pkg/wsl/wsl.go
new file mode 100644
index 0000000..1bd8891
--- /dev/null
+++ b/pkg/wsl/wsl.go
@@ -0,0 +1,10 @@
+// Public Domain (-) 2021-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// Package wsl provides support for Windows Subsystem for Linux (WSL).
+package wsl
+
+// Detect returns whether the program looks like it's running under WSL.
+func Detect() bool {
+ return detect()
+}
diff --git a/pkg/wsl/wsl_linux.go b/pkg/wsl/wsl_linux.go
new file mode 100644
index 0000000..9eab6b5
--- /dev/null
+++ b/pkg/wsl/wsl_linux.go
@@ -0,0 +1,34 @@
+// Public Domain (-) 2021-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package wsl
+
+import (
+ "bytes"
+ "os"
+)
+
+var readFile = os.ReadFile
+
+func detect() bool {
+ // Supposedly, /proc/sys/kernel/osrelease will be of the form:
+ //
+ // <major>.<minor>.<patch>-microsoft-WSL<version>-<flavour>
+ //
+ // For example: 4.19.112-microsoft-WSL2-standard
+ //
+ // Source:
+ // https://github.com/microsoft/WSL/issues/423#issuecomment-611086412
+ os, err := readFile("/proc/sys/kernel/osrelease")
+ if err != nil {
+ return false
+ }
+ if bytes.Contains(os, []byte("WSL")) {
+ return true
+ }
+ // TODO(tav): Remove this check once the kernel/osrelease change has been
+ // backported, as Microsoft may create a popular non-WSL Linux distro at
+ // some point.
+ return bytes.Contains(bytes.ToLower(os), []byte("microsoft"))
+
+}
diff --git a/pkg/wsl/wsl_other.go b/pkg/wsl/wsl_other.go
new file mode 100644
index 0000000..55f78be
--- /dev/null
+++ b/pkg/wsl/wsl_other.go
@@ -0,0 +1,10 @@
+// Public Domain (-) 2021-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// +build !linux
+
+package wsl
+
+func detect() bool {
+ return false
+}
diff --git a/pkg/wsl/wsl_test.go b/pkg/wsl/wsl_test.go
new file mode 100644
index 0000000..178d153
--- /dev/null
+++ b/pkg/wsl/wsl_test.go
@@ -0,0 +1,40 @@
+// Public Domain (-) 2021-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// +build linux
+
+package wsl
+
+import (
+ "errors"
+ "os"
+ "testing"
+)
+
+func TestDetect(t *testing.T) {
+ var fail error
+ rel := ""
+ readFile = func(path string) ([]byte, error) {
+ return []byte(rel), fail
+ }
+ wsl := Detect()
+ if wsl {
+ t.Error("Detect() = true: want false")
+ }
+ rel = "4.19.112-microsoft-WSL2-standard"
+ wsl = Detect()
+ if !wsl {
+ t.Errorf("Detect() = false: want true for %q", rel)
+ }
+ rel = "4.19.112-microsoft-standard"
+ wsl = Detect()
+ if !wsl {
+ t.Errorf("Detect() = false: want true for %q", rel)
+ }
+ fail = errors.New("fail")
+ wsl = Detect()
+ if wsl {
+ t.Errorf("Detect() = false: want true for read failure")
+ }
+ readFile = os.ReadFile
+}
|
espra/espra | 2c1fb57feeae2a142fe8b7892b3f006e6ac95fb7 | pkg/term: add platform-specific handling for bsds and linux | diff --git a/pkg/term/term_bsd.go b/pkg/term/term_bsd.go
new file mode 100644
index 0000000..d1c955a
--- /dev/null
+++ b/pkg/term/term_bsd.go
@@ -0,0 +1,18 @@
+// Public Domain (-) 2010-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package term
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func getTermios(fd int) (*unix.Termios, error) {
+ return unix.IoctlGetTermios(fd, unix.TIOCGETA)
+}
+
+func setTermios(fd int, termios *unix.Termios) error {
+ return unix.IoctlSetTermios(fd, unix.TIOCSETA, termios)
+}
diff --git a/pkg/term/term_linux.go b/pkg/term/term_linux.go
new file mode 100644
index 0000000..99c90f9
--- /dev/null
+++ b/pkg/term/term_linux.go
@@ -0,0 +1,16 @@
+// Public Domain (-) 2010-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package term
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func getTermios(fd int) (*unix.Termios, error) {
+ return unix.IoctlGetTermios(fd, unix.TCGETS)
+}
+
+func setTermios(fd int, termios *unix.Termios) error {
+ return unix.IoctlSetTermios(fd, unix.TCSETS, termios)
+}
|
espra/espra | 0268c372147d359903a340483cd35e7633720601 | staticcheck: enable all checks | diff --git a/staticcheck.conf b/staticcheck.conf
new file mode 100644
index 0000000..51266e6
--- /dev/null
+++ b/staticcheck.conf
@@ -0,0 +1,4 @@
+# Public Domain (-) 2021-present, The Web4 Authors.
+# See the Web4 UNLICENSE file for details.
+
+checks = ["all"]
|
espra/espra | db2652c468e21d0f11884bfffa8f7b705874bd55 | pkg/term/style: add support for styling terminal output | diff --git a/pkg/term/style/style.go b/pkg/term/style/style.go
new file mode 100644
index 0000000..b1aa312
--- /dev/null
+++ b/pkg/term/style/style.go
@@ -0,0 +1,328 @@
+// Public Domain (-) 2010-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// Package style provides support for styling terminal output.
+//
+// See the documentation for the term package's `Style` function to understand
+// the mechanism used to detect whether to emit styled output.
+package style
+
+import (
+ "strconv"
+ "strings"
+
+ "web4.cc/pkg/term"
+)
+
+// Codes for various text effects.
+const (
+ Blink Code = 1 << iota
+ Bold
+ Bright
+ Dim
+ Invert
+ Italic
+ Reset
+ Strikethrough
+ Undercurl
+ Underline
+)
+
+// Codes for the base foreground colors.
+const (
+ Black Code = (iota << 16) | (1 << 10)
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ White
+)
+
+// Codes for the base background colors.
+const (
+ BlackBG Code = (iota << 40) | (1 << 12)
+ RedBG
+ GreenBG
+ YellowBG
+ BlueBG
+ MagentaBG
+ CyanBG
+ WhiteBG
+)
+
+var style = term.Style()
+
+// Code represents colors and text effects for styling terminal output. You can
+// OR multiple codes together with the following exceptions:
+//
+// * If Reset is set, all other codes will be ignored.
+//
+// * The Bold and Dim text effects cannot be OR-ed together. If they are, only
+// Bold will be applied.
+//
+// * The Undercurl and Underline text effects cannot be OR-ed together. If they
+// are, only Undercurl will be applied.
+//
+// * The Bright text effect only works with the base foreground and background
+// colors.
+//
+// * At most, only two colors can be OR-ed together: a foreground color and a
+// background color, or a foreground color and a undercurl color. Any other
+// color combination, e.g. a foreground with another foreground color, will
+// result in undefined behavior.
+type Code uint64
+
+// NOTE(tav): The 64 bits of the Code value are structured as:
+//
+// - 10 bits for text effects
+// - 1 bit to indicate an 8-bit foreground color
+// - 1 bit to indicate a 24-bit foreground color
+// - 1 bit to indicate an 8-bit background color
+// - 1 bit to indicate a 24-bit background color
+// - 1 bit to indicate an 8-bit undercurl color
+// - 1 bit to indicate a 24-bit undercurl color
+// - 24 bits for the foreground color
+// - 24 bits for the background/undercurl color
+
+// EscapeCodes returns the ANSI escape codes for the Code. This function doesn't
+// pay any heed to whether the terminal supports styled output or not.
+func (c Code) EscapeCodes() string {
+ if c == 0 || c&Reset != 0 {
+ return "\x1b[0m"
+ }
+ b := strings.Builder{}
+ b.WriteString("\x1b[")
+ bright := false
+ undercurl := false
+ written := false
+ // Handle text effects.
+ if c&1023 != 0 {
+ // NOTE(tav): Bold and Dim are exclusive of each other.
+ if c&Bold != 0 {
+ b.WriteByte('1')
+ written = true
+ } else if c&Dim != 0 {
+ b.WriteByte('2')
+ written = true
+ }
+ if c&Bright != 0 {
+ bright = true
+ }
+ if c&Italic != 0 {
+ if written {
+ b.WriteByte(';')
+ }
+ b.WriteByte('3')
+ written = true
+ }
+ // NOTE(tav): Undercurl and Underline are exclusive of each other.
+ if c&Undercurl != 0 {
+ if written {
+ b.WriteByte(';')
+ }
+ b.WriteString("4:3")
+ undercurl = true
+ written = true
+ } else if c&Underline != 0 {
+ if written {
+ b.WriteByte(';')
+ }
+ b.WriteByte('4')
+ written = true
+ }
+ if c&Blink != 0 {
+ if written {
+ b.WriteByte(';')
+ }
+ b.WriteByte('5')
+ written = true
+ }
+ if c&Invert != 0 {
+ if written {
+ b.WriteByte(';')
+ }
+ b.WriteByte('7')
+ written = true
+ }
+ if c&Strikethrough != 0 {
+ if written {
+ b.WriteByte(';')
+ }
+ b.WriteByte('9')
+ written = true
+ }
+ }
+ // Handle foreground colors.
+ c >>= 10
+ if c&3 != 0 {
+ color := (c >> 6) & 0xffffff
+ if c&1 != 0 {
+ if color <= 8 {
+ if written {
+ b.WriteByte(';')
+ }
+ if bright {
+ b.WriteByte('9')
+ b.WriteByte('0' + uint8(color))
+ } else {
+ b.WriteByte('3')
+ b.WriteByte('0' + uint8(color))
+ }
+ written = true
+ } else if color <= 255 {
+ if written {
+ b.WriteByte(';')
+ }
+ b.WriteString("38:5:")
+ b.WriteString(strconv.FormatUint(uint64(color), 10))
+ written = true
+ }
+ } else {
+ if written {
+ b.WriteByte(';')
+ }
+ b.WriteString("38:2::")
+ b.WriteString(strconv.FormatUint(uint64(color&0xff), 10))
+ b.WriteByte(':')
+ b.WriteString(strconv.FormatUint(uint64((color>>8)&0xff), 10))
+ b.WriteByte(':')
+ b.WriteString(strconv.FormatUint(uint64((color>>16)&0xff), 10))
+ written = true
+ }
+ }
+ // Handle background colors.
+ c >>= 2
+ if c&3 != 0 {
+ color := c >> 28
+ if c&1 != 0 {
+ if color <= 8 {
+ if written {
+ b.WriteByte(';')
+ }
+ if bright {
+ b.WriteByte('1')
+ b.WriteByte('0')
+ b.WriteByte('0' + uint8(color))
+ } else {
+ b.WriteByte('4')
+ b.WriteByte('0' + uint8(color))
+ }
+ written = true
+ } else if color <= 255 {
+ if written {
+ b.WriteByte(';')
+ }
+ b.WriteString("48:5:")
+ b.WriteString(strconv.FormatUint(uint64(color), 10))
+ written = true
+ }
+ } else {
+ if written {
+ b.WriteByte(';')
+ }
+ b.WriteString("48:2::")
+ b.WriteString(strconv.FormatUint(uint64(color&0xff), 10))
+ b.WriteByte(':')
+ b.WriteString(strconv.FormatUint(uint64((color>>8)&0xff), 10))
+ b.WriteByte(':')
+ b.WriteString(strconv.FormatUint(uint64((color>>16)&0xff), 10))
+ written = true
+ }
+ }
+ // Handle undercurl colors.
+ c >>= 2
+ if c&3 != 0 {
+ if written {
+ b.WriteByte(';')
+ }
+ if !undercurl {
+ b.WriteString("4:3;")
+ }
+ color := c >> 26
+ if c&1 != 0 {
+ b.WriteString("58:5:")
+ b.WriteString(strconv.FormatUint(uint64(color), 10))
+ written = true
+ } else {
+ b.WriteString("58:2::")
+ b.WriteString(strconv.FormatUint(uint64(color&0xff), 10))
+ b.WriteByte(':')
+ b.WriteString(strconv.FormatUint(uint64((color>>8)&0xff), 10))
+ b.WriteByte(':')
+ b.WriteString(strconv.FormatUint(uint64((color>>16)&0xff), 10))
+ written = true
+ }
+ }
+ if written {
+ b.WriteByte('m')
+ return b.String()
+ }
+ return ""
+}
+
+// String returns the ANSI escape codes for the Code. If the terminal doesn't
+// support styled output, this function will return an empty string.
+func (c Code) String() string {
+ if !style {
+ return ""
+ }
+ return c.EscapeCodes()
+}
+
+// Background256 returns the background color code representing the given 256
+// color value.
+func Background256(v uint8) Code {
+ return (Code(v) << 40) | (1 << 12)
+}
+
+// BackgroundRGB returns the background color code representing the given 24-bit
+// color.
+func BackgroundRGB(r uint8, g uint8, b uint8) Code {
+ code := Code(r) | (Code(g) << 8) | (Code(b) << 16)
+ return (code << 40) | (1 << 13)
+}
+
+// ForceWrap encloses the given text with escape codes to stylize it and then
+// resets the output back to normal. Unlike Wrap, this function doesn't pay any
+// heed to whether the terminal supports styled output or not.
+func ForceWrap(s string, c Code) string {
+ return c.String() + s + "\x1b[0m"
+}
+
+// Foreground256 returns the foreground color code representing the given 256
+// color value.
+func Foreground256(v uint8) Code {
+ return (Code(v) << 16) | (1 << 10)
+}
+
+// ForegroundRGB returns the foreground color code representing the given 24-bit
+// color.
+func ForegroundRGB(r uint8, g uint8, b uint8) Code {
+ code := Code(r) | (Code(g) << 8) | (Code(b) << 16)
+ return (code << 16) | (1 << 11)
+}
+
+// Undercurl256 returns an undercurl color code representing the given 256 color
+// value.
+func Undercurl256(v uint8) Code {
+ return (Code(v) << 40) | (1 << 14)
+}
+
+// UndercurlRGB returns an undercurl color code representing the given 24-bit
+// color.
+func UndercurlRGB(r uint8, g uint8, b uint8) Code {
+ code := Code(r) | (Code(g) << 8) | (Code(b) << 16)
+ return (code << 40) | (1 << 15)
+}
+
+// Wrap encloses the given text with escape codes to stylize it and then resets
+// the output back to normal. If the terminal doesn't support styled output,
+// this function will return the given text without any changes.
+func Wrap(s string, c Code) string {
+ if !style {
+ return s
+ }
+ return ForceWrap(s, c)
+}
diff --git a/pkg/term/style/style_test.go b/pkg/term/style/style_test.go
new file mode 100644
index 0000000..5d437b8
--- /dev/null
+++ b/pkg/term/style/style_test.go
@@ -0,0 +1,76 @@
+// Public Domain (-) 2010-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package style
+
+import (
+ "testing"
+)
+
+var tests = []testcase{
+ {Bold, "\x1b[1m"},
+ {Bold | Red, "\x1b[1;31m"},
+ {Bold | Red | WhiteBG, "\x1b[1;31;47m"},
+ {Bold | Bright | Red, "\x1b[1;91m"},
+ {Bold | Bright | Red | WhiteBG, "\x1b[1;91;107m"},
+ {Bold | Bright | Red | WhiteBG | Reset, "\x1b[0m"},
+ {Bold | Blink, "\x1b[1;5m"},
+ {Bold | Dim, "\x1b[1m"},
+ {Bold | Italic | Undercurl, "\x1b[1;3;4:3m"},
+ {Bold | Italic | Undercurl | Underline, "\x1b[1;3;4:3m"},
+ {Bold | Italic | Underline, "\x1b[1;3;4m"},
+ {Bold | Foreground256(100), "\x1b[1;38:5:100m"},
+ {Bold | Foreground256(100) | Background256(100), "\x1b[1;38:5:100;48:5:100m"},
+ {Bold | Foreground256(100) | Undercurl256(100), "\x1b[1;38:5:100;4:3;58:5:100m"},
+ {Bold | Background256(100), "\x1b[1;48:5:100m"},
+ {Bold | Undercurl256(100), "\x1b[1;4:3;58:5:100m"},
+ {Bold | ForegroundRGB(100, 90, 80), "\x1b[1;38:2::100:90:80m"},
+ {Bold | BackgroundRGB(100, 90, 80), "\x1b[1;48:2::100:90:80m"},
+ {Bold | UndercurlRGB(100, 90, 80), "\x1b[1;4:3;58:2::100:90:80m"},
+ {Bright, ""},
+ {Bright | Red, "\x1b[91m"},
+ {Dim, "\x1b[2m"},
+ {Invert | Italic | Strikethrough, "\x1b[3;7;9m"},
+ {Reset, "\x1b[0m"},
+ {Undercurl, "\x1b[4:3m"},
+ {Underline, "\x1b[4m"},
+}
+
+type testcase struct {
+ code Code
+ want string
+}
+
+func TestCodes(t *testing.T) {
+ for idx, tt := range tests {
+ got := tt.code.EscapeCodes()
+ if got != tt.want {
+ t.Errorf("test at idx %d = %q: want %q", idx, got, tt.want)
+ }
+ }
+}
+
+func TestWrap(t *testing.T) {
+ ori := style
+ style = true
+ got := Red.String()
+ want := "\x1b[31m"
+ if got != want {
+ t.Errorf(`COLOR=1 Red.String() = %q: want %q`, got, want)
+ }
+ got = Wrap("test", Bold|Red)
+ want = "\x1b[1;31mtest\x1b[0m"
+ if got != want {
+ t.Errorf(`COLOR=1 Wrap("test", Bold|Red) = %q: want %q`, got, want)
+ }
+ style = false
+ got = Red.String()
+ if got != "" {
+ t.Errorf(`COLOR=0 Red.String() = %q: want ""`, got)
+ }
+ got = Wrap("test", Bold|Red)
+ if got != "test" {
+ t.Errorf(`COLOR=0 Wrap("test", Bold|Red) = %q: want "test"`, got)
+ }
+ style = ori
+}
|
espra/espra | 8090dbf168aad0d6a4717d8c254e1f0edcdf4889 | pkg/ident: handle numbers in identifiers + expand initialisms | diff --git a/pkg/ident/ident.go b/pkg/ident/ident.go
index 7e368ba..47751bc 100644
--- a/pkg/ident/ident.go
+++ b/pkg/ident/ident.go
@@ -1,258 +1,258 @@
// Public Domain (-) 2018-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// Package ident provides support for converting identifiers between different
// naming conventions.
package ident
import (
"bytes"
"fmt"
)
// Parts represents the normalized elements of an identifier.
type Parts [][]byte
func (p Parts) String() string {
return string(bytes.Join(p, []byte{','}))
}
// ToCamel converts the identifier into a camelCased string.
func (p Parts) ToCamel() string {
var out []byte
for idx, part := range p {
if idx == 0 {
out = append(out, bytes.ToLower(part)...)
} else {
out = append(out, part...)
}
}
return string(out)
}
// ToKebab converts the identifier into a kebab-cased string.
func (p Parts) ToKebab() string {
var out []byte
for idx, part := range p {
if idx != 0 {
out = append(out, '-')
}
out = append(out, bytes.ToLower(part)...)
}
return string(out)
}
// ToPascal converts the identifier into a PascalCased string.
func (p Parts) ToPascal() string {
var out []byte
for _, part := range p {
out = append(out, part...)
}
return string(out)
}
// ToScreamingSnake converts the identifier into a SCREAMING_SNAKE_CASED string.
func (p Parts) ToScreamingSnake() string {
var out []byte
for idx, part := range p {
if idx != 0 {
out = append(out, '_')
}
out = append(out, bytes.ToUpper(part)...)
}
return string(out)
}
// ToSnake converts the identifier into a snake_cased string.
func (p Parts) ToSnake() string {
var out []byte
for idx, part := range p {
if idx != 0 {
out = append(out, '_')
}
out = append(out, bytes.ToLower(part)...)
}
return string(out)
}
// add appends parts from the given element. It looks for runs of initialisms
// like "HTTPAPIs" and adds them as separate parts, i.e. "HTTP" and "APIs". Once
// all initialisms are detected, the remaining element is added as a single
// part.
func (p Parts) add(elem []byte) Parts {
// Try to match an initialism exactly.
if special, ok := mapping[string(bytes.ToUpper(elem))]; ok {
return append(p, []byte(special))
}
// Try to find the longest initialism matches from the start.
for len(elem) > 0 {
match := ""
pos := -1
for i := 0; i <= len(elem); i++ {
if special, ok := mapping[string(bytes.ToUpper(elem[:i]))]; ok {
match = special
pos = i
}
}
if pos == -1 {
p = append(p, elem)
break
}
p = append(p, []byte(match))
elem = elem[pos:]
}
return p
}
// tryAdd attempts to add parts from the given element. If any initialisms are
// found, they are added in canonical form.
func (p Parts) tryAdd(elem []byte) (Parts, []byte) {
var nelem []byte
// Try to match an initialism exactly.
if special, ok := mapping[string(bytes.ToUpper(elem))]; ok {
return append(p, []byte(special)), nil
}
// Try to match an initialism from the end for the longest identifier with a
// non-uppercase suffix.
last := ""
pos := -1
for i := len(elem) - 1; i >= 0; i-- {
if special, ok := mapping[string(bytes.ToUpper(elem[i:]))]; ok {
last = special
pos = i
}
}
if pos == -1 {
// NOTE(tav): The given elem must be at least 2 characters long. The
// code in FromPascal currently ensures this to be the case.
nelem = elem[len(elem)-2:]
elem = elem[:len(elem)-2]
} else {
elem = elem[:pos]
}
p = p.add(elem)
if len(last) > 0 {
p = append(p, []byte(last))
}
return p, nelem
}
// FromCamel parses the given camelCased identifier into its parts.
func FromCamel(ident string) Parts {
var parts Parts
i := 0
for ; i < len(ident); i++ {
char := ident[i]
if char >= 'A' && char <= 'Z' {
break
}
}
parts = append(parts, normalize([]byte(ident[:i])))
// NOTE(tav): The error must be nil, as ident must be empty or start on an
// uppercase character, per the break clause above.
elems, _ := FromPascal(ident[i:])
return append(parts, elems...)
}
// FromKebab parses the given kebab-cased identifier into its parts.
func FromKebab(ident string) Parts {
var (
elem []byte
parts Parts
)
for i := 0; i < len(ident); i++ {
char := ident[i]
if char == '-' {
if len(elem) == 0 {
continue
}
parts = append(parts, normalize(bytes.ToLower(elem)))
elem = []byte{}
} else {
elem = append(elem, char)
}
}
if len(elem) > 0 {
parts = append(parts, normalize(bytes.ToLower(elem)))
}
return parts
}
// FromPascal parses the given PascalCased identifier into its parts.
func FromPascal(ident string) (Parts, error) {
var (
elem []byte
parts Parts
)
// Ensure the first character is upper case.
if len(ident) > 0 {
char := ident[0]
if char < 'A' || char > 'Z' {
return nil, fmt.Errorf("ident: invalid PascalCased identifier: %q", ident)
}
elem = append(elem, char)
}
caps := true
for i := 1; i < len(ident); i++ {
char := ident[i]
- if char >= 'A' && char <= 'Z' {
+ if (char >= 'A' && char <= 'Z') || (char >= '0' && char <= '9') {
if caps {
elem = append(elem, char)
} else {
caps = true
parts = parts.add(elem)
elem = []byte{char}
}
} else if caps {
caps = false
elem = append(elem, char)
parts, elem = parts.tryAdd(elem)
} else {
elem = append(elem, char)
}
}
if len(elem) > 0 {
parts = parts.add(elem)
}
return parts, nil
}
// FromScreamingSnake parses the given SCREAMING_SNAKE_CASED identifier into its
// parts.
func FromScreamingSnake(ident string) Parts {
return FromSnake(ident)
}
// FromSnake parses the given snake_cased identifier into its parts.
func FromSnake(ident string) Parts {
var (
elem []byte
parts Parts
)
for i := 0; i < len(ident); i++ {
char := ident[i]
if char == '_' {
if len(elem) == 0 {
continue
}
parts = append(parts, normalize(bytes.ToLower(elem)))
elem = []byte{}
} else {
elem = append(elem, char)
}
}
if len(elem) > 0 {
parts = append(parts, normalize(bytes.ToLower(elem)))
}
return parts
}
func normalize(elem []byte) []byte {
if special, ok := mapping[string(bytes.ToUpper(elem))]; ok {
return []byte(special)
}
if len(elem) > 0 && 'a' <= elem[0] && elem[0] <= 'z' {
elem[0] -= 32
}
return elem
}
diff --git a/pkg/ident/ident_test.go b/pkg/ident/ident_test.go
index 9bc1786..71f3839 100644
--- a/pkg/ident/ident_test.go
+++ b/pkg/ident/ident_test.go
@@ -1,222 +1,230 @@
// Public Domain (-) 2020-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
package ident
import (
"strings"
"testing"
)
var spec = map[string]*definition{
"HTTPSServer": {
camel: "httpsServer",
kebab: "https-server",
},
"I": {
camel: "i",
kebab: "i",
},
"IDSet": {
camel: "idSet",
kebab: "id-set",
},
"IDs": {
camel: "ids",
kebab: "ids",
},
"IDsMap": {
camel: "idsMap",
kebab: "ids-map",
},
"NetworkCIDR": {
camel: "networkCIDR",
kebab: "network-cidr",
},
"PCRTestKit": {
camel: "pcrTestKit",
kebab: "pcr-test-kit",
},
"PeerAPIOp": {
camel: "peerAPIOp",
kebab: "peer-api-op",
},
"PeerIDs": {
camel: "peerIDs",
kebab: "peer-ids",
},
+ "SHA256Hash": {
+ camel: "sha256Hash",
+ kebab: "sha256-hash",
+ },
"ServiceAPIKey": {
camel: "serviceAPIKey",
kebab: "service-api-key",
},
"ServiceKey": {
camel: "serviceKey",
kebab: "service-key",
},
"UserACLIDs": {
camel: "userACLIDs",
kebab: "user-acl-ids",
},
"Username": {
camel: "username",
kebab: "username",
},
"XMLHTTP": {
camel: "xmlHTTP",
kebab: "xml-http",
},
"XMLHTTPRequest": {
camel: "xmlHTTPRequest",
kebab: "xml-http-request",
},
}
var tests = []testcase{
{"https-server", spec["HTTPSServer"]},
{"https-server-", spec["HTTPSServer"]},
{"-https-server", spec["HTTPSServer"]},
{"--https-server-", spec["HTTPSServer"]},
{"ids", spec["IDs"]},
{"ids-", spec["IDs"]},
{"-ids", spec["IDs"]},
{"--ids-", spec["IDs"]},
{"ids-map", spec["IDsMap"]},
{"ids-map-", spec["IDsMap"]},
{"-ids-map", spec["IDsMap"]},
{"--ids-map-", spec["IDsMap"]},
{"network-cidr", spec["NetworkCIDR"]},
{"network-cidr-", spec["NetworkCIDR"]},
{"-network-cidr", spec["NetworkCIDR"]},
{"--network-cidr-", spec["NetworkCIDR"]},
{"peer-api-op", spec["PeerAPIOp"]},
{"peer-api-op-", spec["PeerAPIOp"]},
{"-peer-api-op", spec["PeerAPIOp"]},
{"--peer-api-op-", spec["PeerAPIOp"]},
{"peer-ids", spec["PeerIDs"]},
{"peer-ids-", spec["PeerIDs"]},
{"-peer-ids", spec["PeerIDs"]},
{"--peer-ids-", spec["PeerIDs"]},
{"service-api-key", spec["ServiceAPIKey"]},
{"service-api-key-", spec["ServiceAPIKey"]},
{"-service-api-key", spec["ServiceAPIKey"]},
{"--service-api-key-", spec["ServiceAPIKey"]},
{"service-key", spec["ServiceKey"]},
{"service-key-", spec["ServiceKey"]},
{"-service-key", spec["ServiceKey"]},
{"--service-key-", spec["ServiceKey"]},
+ {"sha256-hash", spec["SHA256Hash"]},
+ {"sha256-hash-", spec["SHA256Hash"]},
+ {"-sha256-hash", spec["SHA256Hash"]},
+ {"--sha256-hash-", spec["SHA256Hash"]},
{"user-acl-ids", spec["UserACLIDs"]},
{"user-acl-ids-", spec["UserACLIDs"]},
{"-user-acl-ids", spec["UserACLIDs"]},
{"--user-acl-ids-", spec["UserACLIDs"]},
{"username", spec["Username"]},
{"username-", spec["Username"]},
{"-username", spec["Username"]},
{"--username-", spec["Username"]},
{"xml-http", spec["XMLHTTP"]},
{"xml-http-", spec["XMLHTTP"]},
{"-xml-http", spec["XMLHTTP"]},
{"--xml-http-", spec["XMLHTTP"]},
{"xml-http-request", spec["XMLHTTPRequest"]},
{"xml-http-request-", spec["XMLHTTPRequest"]},
{"-xml-http-request", spec["XMLHTTPRequest"]},
{"--xml-http-request-", spec["XMLHTTPRequest"]},
}
type definition struct {
camel string
kebab string
pascal string
screaming string
snake string
}
type testcase struct {
ident string
want *definition
}
func TestCamel(t *testing.T) {
for _, tt := range spec {
testConversion(t, "Camel", FromCamel, tt.camel, tt)
}
}
func TestKebab(t *testing.T) {
for _, tt := range tests {
testConversion(t, "Kebab", FromKebab, tt.ident, tt.want)
}
}
func TestPascal(t *testing.T) {
MustPascal := func(ident string) Parts {
parts, err := FromPascal(ident)
if err != nil {
t.Fatalf("FromPascal(%q) returned an unexpected error: %s", ident, err)
}
return parts
}
_, err := FromPascal("invalid")
if err == nil {
t.Errorf("FromPascal(%q) failed to return an error", "invalid")
}
for _, tt := range spec {
testConversion(t, "Pascal", MustPascal, tt.pascal, tt)
}
}
func TestScreamingSnake(t *testing.T) {
for _, tt := range tests {
ident := strings.ToUpper(strings.ReplaceAll(tt.ident, "-", "_"))
testConversion(t, "ScreamingSnake", FromScreamingSnake, ident, tt.want)
}
}
func TestSnake(t *testing.T) {
for _, tt := range tests {
ident := strings.ReplaceAll(tt.ident, "-", "_")
testConversion(t, "Snake", FromSnake, ident, tt.want)
}
}
func TestString(t *testing.T) {
ident := "HTTPAPIs"
parts, _ := FromPascal(ident)
got := parts.String()
want := "HTTP,APIs"
if got != want {
t.Errorf("FromPascal(%q).String() = %q: want %q", ident, got, want)
}
}
func testConversion(t *testing.T, typ string, conv func(string) Parts, ident string, want *definition) {
id := conv(ident)
got := id.ToCamel()
if got != want.camel {
t.Errorf("From%s(%q).ToCamel() = %q: want %q", typ, ident, got, want.camel)
}
got = id.ToKebab()
if got != want.kebab {
t.Errorf("From%s(%q).ToKebab() = %q: want %q", typ, ident, got, want.kebab)
}
got = id.ToPascal()
if got != want.pascal {
t.Errorf("From%s(%q).ToPascal() = %q: want %q", typ, ident, got, want.pascal)
}
got = id.ToScreamingSnake()
if got != want.screaming {
t.Errorf("From%s(%q).ToScreamingSnake() = %q: want %q", typ, ident, got, want.screaming)
}
got = id.ToSnake()
if got != want.snake {
t.Errorf("From%s(%q).ToSnake() = %q: want %q", typ, ident, got, want.snake)
}
}
func init() {
for pascal, definition := range spec {
definition.pascal = pascal
definition.snake = strings.ReplaceAll(definition.kebab, "-", "_")
definition.screaming = strings.ToUpper(definition.snake)
}
AddInitialism("PCR")
}
diff --git a/pkg/ident/initialism.go b/pkg/ident/initialism.go
index f2e484b..6c6e17d 100644
--- a/pkg/ident/initialism.go
+++ b/pkg/ident/initialism.go
@@ -1,141 +1,144 @@
// Public Domain (-) 2018-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
package ident
import (
"strings"
)
var mapping = map[string]string{}
// This list helps us satisfy the recommended naming style of variables in Go:
// https://github.com/golang/go/wiki/CodeReviewComments#initialisms
//
// The list is always going to be incomplete, so please add to it as we come
// across new initialisms.
var initialisms = []string{
"ACK",
"ACL",
"ACLs",
"AES",
"ANSI",
"API",
"APIs",
"ARP",
"ASCII",
"ASN1",
"ATM",
"BGP",
"BIOS",
"BLAKE",
"BLAKE3",
"BSS",
"CA",
"CIDR",
"CLI",
"CLUI",
"CPU",
"CPUs",
"CRC",
"CSRF",
"CSS",
"CSV",
"DB",
"DBs",
"DHCP",
"DNS",
"DRM",
"EOF",
"EON",
"FTP",
"GRPC",
"GUID",
"GUIDs",
"HCL",
"HTML",
"HTTP",
"HTTPS",
"IANA",
"ICMP",
"ID",
"IDs",
"IEEE",
"IMAP",
"IP",
"IPs",
"IRC",
"ISO",
"ISP",
"JSON",
"LAN",
"LHS",
"MAC",
"MD5",
"MTU",
"NATO",
"NIC",
"NVRAM",
"OSI",
"PEM",
"POP3",
+ "PTY",
"QPS",
"QUIC",
"RAM",
"RFC",
"RFCs",
"RHS",
"RPC",
"SFTP",
"SHA",
"SHA1",
"SHA256",
"SHA512",
"SLA",
"SMTP",
"SQL",
"SRAM",
"SSH",
"SSID",
"SSL",
"SYN",
"TCP",
"TLS",
"TOML",
"TPS",
"TTL",
+ "TTY",
"UDP",
"UI",
"UID",
"UIDs",
"URI",
"URL",
"USB",
"UTF8",
"UUID",
"UUIDs",
+ "VCS",
"VLAN",
"VM",
"VPN",
"W3C",
"WPA",
"XML",
"XMPP",
"XON",
"XSRF",
"XSS",
"YAML",
}
// AddInitialism adds the given identifier to the set of initialisms. The given
// identifier should be in the PascalCase form and have at most one lower-cased
// letter which must be at the very end.
func AddInitialism(ident string) {
mapping[strings.ToUpper(ident)] = ident
}
func init() {
for _, s := range initialisms {
mapping[strings.ToUpper(s)] = s
}
}
|
espra/espra | f7679cd9e6b2e03c939b34a013f1d5b9afd821ea | pkg/cli: add basic argument parsing and improve validation | diff --git a/pkg/cli/cli.go b/pkg/cli/cli.go
index 75b2926..bbae7d4 100644
--- a/pkg/cli/cli.go
+++ b/pkg/cli/cli.go
@@ -1,365 +1,543 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// Package cli provides an easy way to build command line applications.
//
// If the value for a subcommand is nil, it is treated as if the command didn't
// even exist. This is useful for disabling the builtin subcommands like
// completion and help.
package cli
import (
+ "errors"
"fmt"
"os"
- "sort"
"strings"
"web4.cc/pkg/process"
)
+// Invalid argument types.
+const (
+ UnspecifiedInvalidArg InvalidArgType = iota
+ InvalidEnv
+ InvalidFlag
+ InvalidValue
+ MissingFlag
+ MissingValue
+ RepeatedFlag
+ UnknownSubcommand
+)
+
+// ErrInvalidArg indicates that there was an invalid command line argument. It
+// can be used as the target to errors.Is to test if the returned error from Run
+// calls was as a result of mistyped command line arguments.
+var ErrInvalidArg = errors.New("cli: invalid command line argument")
+
var (
_ cmdrunner = (*Version)(nil)
_ cmdrunner = (*plain)(nil)
)
// Command specifies the basic interface that a command needs to implement. For
-// more fine-grained control, commands can also implement the Completer, Helper,
-// and Runner interfaces.
+// more fine-grained control, commands can also implement any of the Completer,
+// Helper, InvalidArgHelper, and Runner interfaces.
type Command interface {
- Info() *Info
+ About() *Info
}
// Completer defines the interface that a command should implement if it wants
// to provide custom autocompletion on command line arguments.
type Completer interface {
Complete(c *Context) Completion
}
type Completion struct {
}
// Context provides a way to access processed command line info at specific
// points within the command hierarchy.
type Context struct {
args []string
cmd Command
flags []*Flag
name string
opts *optspec
parent *Context
- sub Subcommands
+ subs Subcommands
}
// Args returns the command line arguments for the current context.
func (c *Context) Args() []string {
return clone(c.args)
}
+// ChildContext tries to create a child Context for a subcommand.
+func (c *Context) ChildContext(subcommand string) (*Context, error) {
+ cmd := c.subs[subcommand]
+ if cmd == nil {
+ return nil, c.InvalidArg(UnknownSubcommand, subcommand, nil, nil)
+ }
+ sub := &Context{
+ cmd: cmd,
+ name: subcommand,
+ parent: c,
+ }
+ if err := sub.init(); err != nil {
+ return nil, err
+ }
+ return sub, nil
+}
+
// Command returns the Command associated with the current context. By doing a
// type assertion on the returned value, this can be used to access field values
// of the parent or root context.
func (c *Context) Command() Command {
return c.cmd
}
// Flags returns the command line flags for the current context.
func (c *Context) Flags() []*Flag {
flags := make([]*Flag, len(c.flags))
copy(flags, c.flags)
return flags
}
// FullName returns the space separated sequence of command names, all the way
// from the root to the current context.
func (c *Context) FullName() string {
path := []string{c.name}
for c.parent != nil {
c = c.parent
- path = append(path, c.name)
+ path = append([]string{c.name}, path...)
}
- sort.Sort(sort.Reverse(sort.StringSlice(path)))
return strings.Join(path, " ")
}
// Help returns the help text for a command. Commands wishing to override the
// auto-generated help text, must implement the Helper interface.
func (c *Context) Help() string {
return c.help()
}
+func (c *Context) InvalidArg(typ InvalidArgType, arg string, flag *Flag, err error) error {
+ ia := &InvalidArg{
+ Arg: arg,
+ Context: c,
+ Err: err,
+ Flag: flag,
+ Type: typ,
+ }
+ impl, ok := c.cmd.(InvalidArgHelper)
+ if ok {
+ printHelp(impl.InvalidArg(ia))
+ return ia
+ }
+ x := c
+ for x.parent != nil {
+ x = x.parent
+ impl, ok := x.cmd.(InvalidArgHelper)
+ if ok {
+ printHelp(impl.InvalidArg(ia))
+ return ia
+ }
+ }
+ printErrorf(ia.Details())
+ help := c.contextualHelp(ia)
+ if help != "" {
+ fmt.Println("")
+ printHelp(help)
+ }
+ return ia
+}
+
// Name returns the command name for the current context.
func (c *Context) Name() string {
return c.name
}
// Parent returns the parent of the current context.
func (c *Context) Parent() *Context {
return c.parent
}
// PrintHelp outputs the command's help text to stdout.
func (c *Context) PrintHelp() {
- fmt.Print(c.help())
+ printHelp(c.help())
}
// Program returns the program name, i.e. the command name for the root context.
func (c *Context) Program() string {
root := c.Root()
if root == nil {
return c.name
}
return root.name
}
// Root returns the root context.
func (c *Context) Root() *Context {
for c.parent != nil {
c = c.parent
}
return c
}
+// Default makes it super easy to create tools with subcommands. Just
+// instantiate the struct, with the relevant Info, Subcommands, and pass it to
+// RunThenExit.
+type Default struct {
+ Info *Info `cli:"-"`
+ Subcommands Subcommands
+}
+
+func (d *Default) About() *Info {
+ return d.Info
+}
+
// Flag defines a command line flag derived from a Command struct.
type Flag struct {
cmpl int
env []string
field int
help string
hide bool
- inherit bool
label string
long []string
multi bool
req bool
+ setEnv bool
+ setFlag bool
short []string
typ string
}
// Env returns the environment variables associated with the flag.
func (f *Flag) Env() []string {
return clone(f.env)
}
// Help returns the help info for the flag.
func (f *Flag) Help() string {
return f.help
}
// Hidden returns whether the flag should be hidden from help output.
func (f *Flag) Hidden() bool {
return f.hide
}
-// Inherited returns whether the flag will be inherited by any subcommands.
-func (f *Flag) Inherited() bool {
- return f.inherit
-}
-
// Label returns the descriptive label for the flag option. This is primarily
// used to generate the help text, e.g.
//
// --input-file path
//
// Boolean flags will always result in an empty string as the label. For all
// other types, the following sources are used in priority order:
//
// - Any non-empty value set using the "label" struct tag on the field.
//
// - Any labels that can be extracted from the help info by looking for the
// first non-whitespace separated set of characters enclosed within {braces}
// within the "help" struct tag on the field.
//
// - The field type, e.g. string, int, duration, etc. For non-builtin types,
// this will simply state "value".
func (f *Flag) Label() string {
return f.label
}
// LongFlags returns the associated long flags.
func (f *Flag) LongFlags() []string {
return clone(f.long)
}
// Multi returns whether the flag can be set multiple times.
func (f *Flag) Multi() bool {
return f.multi
}
// Required returns whether the flag has been marked as required.
func (f *Flag) Required() bool {
return f.req
}
// ShortFlags returns the associated short flags.
func (f *Flag) ShortFlags() []string {
return clone(f.short)
}
// Helper defines the interface that a command should implement if it wants
// fine-grained control over the help text. Otherwise, the text is
-// auto-generated from the command name, Info() output, and struct fields.
+// auto-generated from the command name, About() output, and struct fields.
type Helper interface {
Help(c *Context) string
}
// Info
type Info struct {
Short string
}
+// InvalidArg
+type InvalidArg struct {
+ Arg string
+ Context *Context
+ Err error
+ Flag *Flag
+ Type InvalidArgType
+}
+
+func (i *InvalidArg) Details() string {
+ // root := i.Context.parent == nil
+ name := i.Context.FullName()
+ // if !root {
+ // name = "subcommand " + name
+ // }
+ switch i.Type {
+ case InvalidFlag:
+ return fmt.Sprintf("%s: invalid flag %q", name, i.Arg)
+ case MissingFlag:
+ flag := i.Flag
+ if len(flag.long) > 0 {
+ return fmt.Sprintf("%s: missing required flag --%s", name, flag.long[0])
+ }
+ if len(flag.short) > 0 {
+ return fmt.Sprintf("%s: missing required flag -%s", name, flag.short[0])
+ }
+ return fmt.Sprintf("%s: missing required env %s", name, flag.env[0])
+ case UnknownSubcommand:
+ return fmt.Sprintf("%s: unknown command %q", name, i.Arg)
+ default:
+ return fmt.Sprintf("%#v\nType: %s", i, i.Type)
+ }
+ return "boom"
+}
+
+func (i *InvalidArg) Error() string {
+ return fmt.Sprintf("cli: invalid command line argument: %s", i.Details())
+}
+
+func (i *InvalidArg) Is(target error) bool {
+ return target == ErrInvalidArg
+}
+
+// InvalidArgHelper defines the interface that a command should implement to
+// control the error output when an invalid command line argument is
+// encountered.
+//
+// The returned string is assumed to be contextual help based on the InvalidArg,
+// and will be emitted to stdout. Non-empty strings will have a newline appended
+// to them if they don't already include one.
+//
+// If this interface isn't implemented, commands will default to printing an
+// error about the invalid argument to stderr, followed by auto-generated
+// contextual help text.
+type InvalidArgHelper interface {
+ InvalidArg(ia *InvalidArg) string
+}
+
+type InvalidArgType int
+
+func (i InvalidArgType) String() string {
+ switch i {
+ case UnspecifiedInvalidArg:
+ return "UnspecifiedInvalidArg"
+ case InvalidEnv:
+ return "InvalidEnv"
+ case InvalidFlag:
+ return "InvalidFlag"
+ case InvalidValue:
+ return "InvalidValue"
+ case MissingFlag:
+ return "MissingFlag"
+ case MissingValue:
+ return "MissingValue"
+ case RepeatedFlag:
+ return "RepeatedFlag"
+ case UnknownSubcommand:
+ return "UnknownSubcommand"
+ default:
+ return "UnknownInvalidArg"
+ }
+}
+
// Option configures the root context.
type Option func(c *Context)
// Runner defines the interface that a command should implement to handle
// command line arguments.
type Runner interface {
Run(c *Context) error
}
// Subcommands defines the field type for defining subcommands on a struct.
type Subcommands map[string]Command
// Version provides a default implementation to use as a subcommand to output
// version info.
type Version string
-func (v Version) Info() *Info {
+func (v Version) About() *Info {
return &Info{
- Short: "Show the #{Program} version info",
+ Short: "Show the {Program} version info",
}
}
func (v Version) Run(c *Context) error {
fmt.Println(v)
return nil
}
type cmdrunner interface {
Command
Runner
}
type plain struct {
info *Info
run func(c *Context) error
}
-func (p plain) Info() *Info {
+func (p plain) About() *Info {
return p.info
}
func (p plain) Run(c *Context) error {
return p.run(c)
}
type optspec struct {
autoenv bool
envprefix string
showenv bool
validate bool
}
-// EnvPrefix overrides the default prefix of the program name when automatically
-// deriving environment variables. Use an empty string if the environment
-// variables should be unprefixed.
+// AutoEnv enables the automatic derivation of environment variable names from
+// the exported field names of Command structs. By default, the program name
+// will be converted to SCREAMING_CASE with a trailing underscore, and used as a
+// prefix for all generated environment variables. This can be controlled using
+// the EnvPrefix Option.
+func AutoEnv(c *Context) {
+ c.opts.autoenv = true
+}
+
+// EnvPrefix enables AutoEnv and overrides the default prefix of the program
+// name when automatically deriving environment variables. Use an empty string
+// if the environment variables should be unprefixed.
//
-// This function will panic if the given prefix is not empty or made up of
-// uppercase letters and underscores. Non-empty values must not have a trailing
-// underscore. One will be appended automatically.
+// This function will panic if the given prefix is not empty or is invalid, i.e.
+// not made up of uppercase letters and underscores. Non-empty values must not
+// have a trailing underscore. One will be appended automatically.
func EnvPrefix(s string) func(*Context) {
if !isEnv(s) {
panic(fmt.Errorf("cli: invalid env prefix: %q", s))
}
if s != "" {
s += "_"
}
return func(c *Context) {
+ c.opts.autoenv = true
c.opts.envprefix = s
}
}
// FromFunc will define a new Command from the given run function and short info
// string. It's useful for defining commands where there's no need to handle any
// command line flags.
func FromFunc(run func(c *Context) error, info string) Command {
- return plain{
+ return &plain{
info: &Info{Short: info},
run: run,
}
}
-// NoAutoEnv disables the automatic derivation of environment variable names
-// from the exported field names of Command structs.
-func NoAutoEnv(c *Context) {
- c.opts.autoenv = false
-}
-
// NoValidate disables the automatic validation of all commands and subcommands.
// Validation adds to the startup time, and can be instead done by calling the
-// Validate function directly from within tests.
+// Validate function from within tests.
func NoValidate(c *Context) {
c.opts.validate = false
}
-// ShowEnvHelp emits the associated environment variable names when
-// auto-generating help text.
-func ShowEnvHelp(c *Context) {
- c.opts.showenv = true
-}
-
// Run processes the command line arguments in the context of the given Command.
// The given program name will be used to auto-generate help text and error
// messages.
func Run(name string, cmd Command, args []string, opts ...Option) error {
if len(args) < 1 {
return fmt.Errorf("cli: missing executable path in the given args slice")
}
c, err := newRoot(name, cmd, args[1:], opts...)
if err != nil {
return err
}
if c.opts.validate {
if err := validate(c); err != nil {
return err
}
}
return c.run()
}
-// RunThenExit provides a utility function for the common case of calling Run
-// with os.Args, printing the error on failure, and exiting with a status code
-// of 1 on failure, and 0 on success.
+// RunThenExit provides a utility function that:
//
-// The function will use process.Exit instead of os.Exit so that registered exit
-// handlers will run.
+// * Calls Run with os.Args.
+//
+// * If Run returns an error, prints the error as long as it's not InvalidArg
+// related.
+//
+// * Exits with a status code of 0 on success, 2 on InvalidArg, and 1 otherwise.
+//
+// The function will use process.Exit instead of os.Exit so that any registered
+// exit handlers will run.
func RunThenExit(name string, cmd Command, opts ...Option) {
err := Run(name, cmd, os.Args, opts...)
if err != nil {
- printErrorf("%s failed: %s", name, err)
+ if errors.Is(err, ErrInvalidArg) {
+ process.Exit(2)
+ }
+ printErrorf("%s: %s", name, err)
process.Exit(1)
}
process.Exit(0)
}
+// ShowEnvHelp emits the associated environment variable names when
+// auto-generating help text.
+func ShowEnvHelp(c *Context) {
+ c.opts.showenv = true
+}
+
// Validate ensures that the given Command and all descendants have compliant
// struct tags and command names. Without this, validation only happens for the
// specific commands when they are executed on the command line.
func Validate(name string, cmd Command, opts ...Option) error {
c, err := newRoot(name, cmd, nil, opts...)
if err != nil {
return err
}
return validate(c)
}
// NOTE(tav): We return copies of slices to callers so that they don't
// accidentally mutate them.
func clone(xs []string) []string {
ys := make([]string, len(xs))
copy(ys, xs)
return ys
}
func printErrorf(format string, args ...interface{}) {
- fmt.Fprintf(os.Stderr, format+"\n", args...)
+ fmt.Fprintf(os.Stderr, "ERROR\nERROR\t"+format+"\nERROR\n", args...)
+}
+
+func printHelp(help string) {
+ if help == "" {
+ return
+ }
+ if help[len(help)-1] != '\n' {
+ fmt.Print(help + "\n")
+ } else {
+ fmt.Print(help)
+ }
}
diff --git a/pkg/cli/context.go b/pkg/cli/context.go
index dc6dfe4..345855c 100644
--- a/pkg/cli/context.go
+++ b/pkg/cli/context.go
@@ -1,492 +1,729 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
package cli
import (
"encoding"
"fmt"
+ "os"
"reflect"
"strings"
"time"
"web4.cc/pkg/ident"
)
var (
typeCompletion = reflect.TypeOf(&Completion{})
typeContext = reflect.TypeOf(&Context{})
typeDuration = reflect.TypeOf(time.Duration(0))
typeSubcommands = reflect.TypeOf(Subcommands{})
typeTextUnmarshaler = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
typeTime = reflect.TypeOf(time.Time{})
)
+func (c *Context) contextualHelp(ia *InvalidArg) string {
+ b := strings.Builder{}
+ b.WriteString("Contextual Usage: \n")
+ return b.String()
+}
+
+func (c *Context) defaultHelp() string {
+ b := strings.Builder{}
+ b.WriteString("Default Usage: ")
+ b.WriteString(c.FullName())
+ b.WriteByte('\n')
+ return b.String()
+}
+
func (c *Context) help() string {
impl, ok := c.cmd.(Helper)
if ok {
return impl.Help(c)
}
- b := strings.Builder{}
- b.WriteByte('\n')
- return b.String()
+ x := c
+ for x.parent != nil {
+ x = x.parent
+ impl, ok := x.cmd.(Helper)
+ if ok {
+ return impl.Help(c)
+ }
+ }
+ return c.defaultHelp()
}
func (c *Context) init() error {
ptr := false
rv := reflect.ValueOf(c.cmd)
oriType := rv.Type()
if rv.Kind() == reflect.Ptr {
ptr = true
rv = rv.Elem()
}
- // Extract the subcommands mapping if a field with the right name and type
- // exists on a struct.
+ // If it's a struct, ensure the original type is a pointer, and extract the
+ // subcommands mapping if a field with the right name and type exists.
rt := rv.Type()
if rv.Kind() == reflect.Struct {
+ if !ptr {
+ if c.parent == nil {
+ return fmt.Errorf(
+ "cli: invalid Command for %q: Command structs must be pointers, not %s",
+ c.name, oriType,
+ )
+ }
+ return fmt.Errorf(
+ "cli: invalid Command for the %q subcommand: Command structs must be pointers, not %s",
+ c.FullName(), oriType,
+ )
+ }
field, ok := rt.FieldByName("Subcommands")
- if ok && field.Type == typeSubcommands {
- c.sub = rv.FieldByName("Subcommands").Interface().(Subcommands)
+ if ok {
+ if field.Type != typeSubcommands {
+ return fmt.Errorf(
+ "cli: the Subcommands field on %s is not cli.Subcommands",
+ oriType,
+ )
+ }
+ subs := rv.FieldByName("Subcommands").Interface().(Subcommands)
+ if c.parent == nil {
+ for name, sub := range subs {
+ c.subs[name] = sub
+ }
+ } else {
+ c.subs = subs
+ }
+ }
+ // Check for potential typo.
+ _, ok = rt.FieldByName("SubCommands")
+ if ok {
+ return fmt.Errorf(
+ "cli: invalid field SubCommands on %s: did you mean Subcommands?",
+ oriType,
+ )
}
} else {
ptr = false
}
// Skip processing of flags if the command isn't a struct pointer.
if !ptr {
return nil
}
// Process command line flags from the struct definition.
seen := map[string]string{}
flen := rt.NumField()
outer:
for i := 0; i < flen; i++ {
field := rt.Field(i)
tag := field.Tag
- // Skip invalid fields.
+ // Skip invalid fields and special fields.
if field.PkgPath != "" || field.Anonymous || tag == "" {
continue
}
+ if field.Name == "Subcommands" {
+ continue
+ }
// Process the field name.
name, err := ident.FromPascal(field.Name)
if err != nil {
return fmt.Errorf(
"cli: could not convert field name %s on %s: %s",
field.Name, oriType, err,
)
}
// Set defaults.
flag := &Flag{
cmpl: -1,
field: i,
help: strings.TrimSpace(tag.Get("help")),
label: strings.TrimSpace(tag.Get("label")),
}
lflag := name.ToKebab()
if prev, ok := seen[lflag]; ok {
return fmt.Errorf(
"cli: the derived --%s flag for field %s conflicts with %s on %s",
lflag, field.Name, prev, oriType,
)
}
flag.long = append(flag.long, lflag)
seen[lflag] = field.Name
optspec := c.Root().opts
if optspec.autoenv {
env := optspec.envprefix + name.ToScreamingSnake()
if prev, ok := seen[env]; ok {
return fmt.Errorf(
"cli: the derived environment variable %s for field %s conflicts with %s on %s",
env, field.Name, prev, oriType,
)
}
flag.env = []string{env}
seen[env] = field.Name
}
// If no label has been specified, see if the help text has an embedded
// label.
if flag.label == "" && flag.help != "" {
flag.help, flag.label = extractLabel(flag.help)
}
// Process the cli tag.
+ skipenv := false
for _, opt := range strings.Split(tag.Get("cli"), " ") {
opt = strings.TrimSpace(opt)
if opt == "" {
continue
}
if opt == "-" {
continue outer
}
if opt == "!autoenv" {
- flag.env = flag.env[1:]
+ if optspec.autoenv {
+ flag.env = flag.env[1:]
+ skipenv = true
+ }
continue
}
if opt == "!autoflag" {
flag.long = flag.long[1:]
continue
}
if opt == "hidden" {
flag.hide = true
continue
}
- if opt == "inherited" {
- flag.inherit = true
- continue
- }
if opt == "required" {
flag.req = true
continue
}
if strings.HasPrefix(opt, "-") {
if strings.ToLower(opt) != opt {
goto invalid
}
if len(opt) == 2 && isShortFlag(opt[1]) {
sflag := opt[1:]
if prev, ok := seen[sflag]; ok {
return fmt.Errorf(
"cli: the -%s flag from field %s conflicts with %s on %s",
sflag, field.Name, prev, oriType,
)
}
flag.short = append(flag.short, sflag)
seen[sflag] = field.Name
continue
}
if strings.HasPrefix(opt, "--") && len(opt) >= 4 {
lflag := opt[2:]
if !isLongFlag(lflag) {
goto invalid
}
if prev, ok := seen[lflag]; ok {
return fmt.Errorf(
"cli: the --%s flag from field %s conflicts with %s on %s",
lflag, field.Name, prev, oriType,
)
}
flag.long = append(flag.long, lflag)
seen[lflag] = field.Name
continue
}
invalid:
return fmt.Errorf(
"cli: invalid flag value %q found for field %s on %s",
opt, field.Name, oriType,
)
}
if opt == strings.ToUpper(opt) {
if !isEnv(opt) {
return fmt.Errorf(
"cli: invalid environment variable %q found for field %s on %s",
opt, field.Name, oriType,
)
}
if prev, ok := seen[opt]; ok {
return fmt.Errorf(
"cli: the environment variable %s for field %s conflicts with %s on %s",
opt, field.Name, prev, oriType,
)
}
flag.env = append(flag.env, opt)
seen[opt] = field.Name
continue
}
if strings.HasPrefix(opt, "Complete") {
meth, ok := oriType.MethodByName(opt)
if !ok {
return fmt.Errorf(
"cli: completer method %s not found for field %s on %s",
opt, field.Name, oriType,
)
}
if errmsg := isCompleter(meth.Type); errmsg != "" {
return fmt.Errorf(
"cli: invalid completer method %s for field %s on %s: %s",
opt, field.Name, oriType, errmsg,
)
}
if flag.cmpl != -1 {
return fmt.Errorf(
"cli: completer already set for field %s on %s",
field.Name, oriType,
)
}
flag.cmpl = meth.Index
} else {
return fmt.Errorf(
"cli: invalid cli tag value %q for field %s on %s",
opt, field.Name, oriType,
)
}
}
// Figure out the flag type.
flag.typ = getFlagType(field.Type, false)
if flag.typ == "" {
return fmt.Errorf(
"cli: unsupported flag type %s for field %s on %s",
field.Type, field.Name, oriType,
)
}
if strings.HasPrefix(flag.typ, "[]") {
flag.multi = true
+ max := 0
+ if optspec.autoenv {
+ max++
+ }
+ if skipenv {
+ max--
+ }
+ if len(flag.env) > max {
+ return fmt.Errorf(
+ "cli: environment variables are not supported for slice types, as used for field %s on %s",
+ field.Name, oriType,
+ )
+ }
+ flag.env = nil
}
if flag.typ == "bool" {
flag.label = ""
} else if flag.label == "" {
flag.label = flag.typ
}
+ // Error on missing env/flags.
+ if len(flag.long) == 0 && len(flag.short) == 0 && len(flag.env) == 0 {
+ if flag.multi {
+ return fmt.Errorf(
+ "cli: missing flags for field %s on %s", field.Name, oriType,
+ )
+ }
+ return fmt.Errorf(
+ "cli: missing flags or environment variables for field %s on %s",
+ field.Name, oriType,
+ )
+ }
c.flags = append(c.flags, flag)
}
return nil
}
-func (c *Context) run() error {
- if c.parent != nil || !c.opts.validate {
+func (c *Context) run() (err error) {
+ // Initialize the Context.
+ root := c.parent == nil
+ if !root || !c.opts.validate {
if err := c.init(); err != nil {
return err
}
}
- cmd, ok := c.cmd.(Runner)
- if !ok {
+ // Process the environment variables.
+ for _, flag := range c.flags {
+ for _, env := range flag.env {
+ val := os.Getenv(env)
+ if val == "" {
+ continue
+ }
+ if err := c.setEnv(flag, val); err != nil {
+ return c.InvalidArg(InvalidEnv, val, nil, err)
+ }
+ }
+ }
+ // Process the command line arguments.
+ var (
+ help bool
+ fList []string
+ fName string
+ lArgs []string
+ long bool
+ pArg string
+ pFlag *Flag
+ rArgs []string
+ )
+outer:
+ for i := 0; i < len(c.args); i++ {
+ arg := c.args[i]
+ // Handle any pending flag value.
+ if pFlag != nil {
+ if len(arg) > 0 && arg[0] == '-' {
+ return c.InvalidArg(MissingValue, pArg, nil, nil)
+ }
+ if err := c.setFlag(pFlag, arg); err != nil {
+ return err
+ }
+ pFlag = nil
+ continue outer
+ }
+ // Skip flag processing if we see a double-dash.
+ if arg == "--" {
+ i++
+ for ; i < len(c.args); i++ {
+ rArgs = append(rArgs, c.args[i])
+ }
+ break outer
+ }
+ // Handle new flags.
+ if strings.HasPrefix(arg, "-") {
+ if strings.HasPrefix(arg, "--") {
+ if len(arg) == 3 {
+ return c.InvalidArg(InvalidFlag, arg, nil, nil)
+ }
+ if arg == "--help" {
+ help = true
+ continue outer
+ }
+ fName = arg[2:]
+ long = true
+ } else if len(arg) == 1 {
+ rArgs = append(rArgs, "-")
+ continue outer
+ } else {
+ if len(arg) != 2 {
+ return c.InvalidArg(InvalidFlag, arg, nil, nil)
+ }
+ fName = arg[1:]
+ long = false
+ }
+ for _, flag := range c.flags {
+ if long {
+ fList = flag.long
+ } else {
+ fList = flag.short
+ }
+ for _, name := range fList {
+ if name != fName {
+ continue
+ }
+ if flag.typ == "bool" {
+ if err := c.setFlag(flag, "1"); err != nil {
+ return err
+ }
+ continue outer
+ }
+ pArg = arg
+ pFlag = flag
+ continue outer
+ }
+ }
+ return c.InvalidArg(InvalidFlag, arg, nil, nil)
+ }
+ // Accumulate all arguments after the first non-flag/value argument.
+ for ; i < len(c.args); i++ {
+ lArgs = append(lArgs, c.args[i])
+ }
+ break outer
+ }
+ // Check all required flags have been set.
+ for _, flag := range c.flags {
+ if flag.req && !(flag.setEnv || flag.setFlag) {
+ return c.InvalidArg(MissingFlag, "", flag, nil)
+ }
+ }
+ // Run the subcommand if there's a match.
+ if len(lArgs) > 0 {
+ name := lArgs[0]
+ for sub, cmd := range c.subs {
+ if name != sub {
+ continue
+ }
+ if cmd == nil {
+ break
+ }
+ if help {
+ lArgs[0] = "--help"
+ lArgs = append(lArgs, rArgs...)
+ } else {
+ lArgs = append(lArgs[1:], rArgs...)
+ }
+ csub, err := newContext(name, cmd, lArgs, c)
+ if err != nil {
+ return err
+ }
+ return csub.run()
+ }
+ }
+ // Print the help text if --help was specified.
+ if help {
+ c.PrintHelp()
return nil
}
+ cmd, runner := c.cmd.(Runner)
+ // Handle non-Runners.
+ if !runner {
+ if len(lArgs) == 0 {
+ c.PrintHelp()
+ return nil
+ }
+ return c.InvalidArg(UnknownSubcommand, lArgs[0], nil, nil)
+ }
+ c.args = append(lArgs, rArgs...)
return cmd.Run(c)
}
+func (c *Context) setEnv(flag *Flag, val string) error {
+ flag.setEnv = true
+ return nil
+}
+
+func (c *Context) setFlag(flag *Flag, val string) error {
+ // if seen[name] && !flag.multi {
+ // return c.InvalidArg(RepeatedFlag, arg, nil, nil)
+ // }
+ flag.setFlag = true
+ return nil
+}
+
func extractLabel(help string) (string, string) {
end := len(help)
for i := 0; i < end; i++ {
if help[i] == '{' {
for j := i + 1; j < end; j++ {
char := help[j]
if char == ' ' {
break
}
if char == '}' {
if j-i == 1 {
break
}
label := help[i+1 : j]
return help[:i] + label + help[j+1:], label
}
}
}
}
return help, ""
}
func getFlagType(rt reflect.Type, slice bool) string {
switch kind := rt.Kind(); kind {
case reflect.Bool:
if slice {
return ""
}
return "bool"
case reflect.Float32:
return "float32"
case reflect.Float64:
return "float64"
case reflect.Int:
return "int"
case reflect.Int8:
return "int8"
case reflect.Int16:
return "int16"
case reflect.Int32:
return "int32"
case reflect.Int64:
switch rt {
case typeDuration:
return "duration"
default:
return "int64"
}
case reflect.Interface, reflect.Ptr, reflect.Struct:
if rt == typeTime {
return "rfc3339"
}
switch kind {
case reflect.Ptr:
if rt.Elem() == typeTime {
return "rfc3339"
}
case reflect.Struct:
rt = reflect.PtrTo(rt)
}
if rt.Implements(typeTextUnmarshaler) {
return "value"
}
return ""
case reflect.Slice:
if slice {
// Only byte slices are supported as a potential slice type within a
// slice.
if rt.Elem().Kind() == reflect.Uint8 {
return "string"
}
return ""
}
if rt.Elem().Kind() == reflect.Uint8 {
return "string"
}
elem := getFlagType(rt.Elem(), true)
if elem == "" {
return elem
}
return "[]" + elem
case reflect.String:
return "string"
case reflect.Uint:
return "int"
case reflect.Uint8:
return "uint8"
case reflect.Uint16:
return "uint16"
case reflect.Uint32:
return "uint32"
case reflect.Uint64:
return "uint64"
default:
return ""
}
}
// NOTE(tav): These checks need to be kept in sync with any changes to the
// Completer interface.
func isCompleter(rt reflect.Type) string {
if n := rt.NumIn(); n != 2 {
return fmt.Sprintf("method must have 1 argument, not %d", n-1)
}
if in := rt.In(1); in != typeContext {
return fmt.Sprintf("method's argument must be a *cli.Context, not %s", in)
}
if rt.NumOut() != 1 {
return "method must have only one return value"
}
if out := rt.Out(0); out != typeCompletion {
return fmt.Sprintf("method's return value must be a *cli.Completion, not %s", out)
}
return ""
}
func isEnv(env string) bool {
last := len(env) - 1
for i := 0; i < len(env); i++ {
char := env[i]
if i == 0 {
if char < 'A' || char > 'Z' {
return false
}
continue
}
if char == '_' {
if i == last {
return false
}
continue
}
if (char >= 'A' && char <= 'Z') || (char >= '0' && char <= '9') {
continue
}
return false
}
return true
}
func isLongFlag(flag string) bool {
last := len(flag) - 1
for i := 0; i < len(flag); i++ {
char := flag[i]
if i == 0 {
if char < 'a' || char > 'z' {
return false
}
continue
}
if char == '-' {
if i == last {
return false
}
continue
}
if (char >= 'a' && char <= 'z') || (char >= '0' && char <= '9') {
continue
}
return false
}
return true
}
func isShortFlag(char byte) bool {
return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') ||
(char >= '0' && char <= '9')
}
func isValidName(name string) bool {
for i := 0; i < len(name); i++ {
char := name[i]
if char == '-' || (char >= 'a' && char <= 'z') {
continue
}
return false
}
return true
}
func newContext(name string, cmd Command, args []string, parent *Context) (*Context, error) {
if !isValidName(name) {
if parent == nil {
return nil, fmt.Errorf("cli: invalid program name: %q", name)
}
fname := parent.FullName()
- return nil, fmt.Errorf("cli: invalid name %q for %q subcommand", name, fname)
+ return nil, fmt.Errorf("cli: invalid name %q for the %q subcommand", name, fname)
}
c := &Context{
args: args,
cmd: cmd,
name: name,
}
if parent == nil {
c.opts = &optspec{
- autoenv: true,
validate: true,
}
} else {
c.parent = parent
}
return c, nil
}
func newRoot(name string, cmd Command, args []string, opts ...Option) (*Context, error) {
if cmd == nil {
return nil, fmt.Errorf("cli: the Command instance for %q is nil", name)
}
c, err := newContext(name, cmd, args, nil)
if err != nil {
return nil, err
}
upper := strings.ToUpper(name)
c.opts.envprefix = strings.ReplaceAll(upper, "-", "_") + "_"
for _, opt := range opts {
opt(c)
}
+ c.subs = Subcommands{
+ "completion": builtinCompletion,
+ "help": builtinHelp,
+ }
return c, nil
}
func validate(c *Context) error {
if err := c.init(); err != nil {
return err
}
- for name, cmd := range c.sub {
+ for name, cmd := range c.subs {
if cmd == nil {
continue
}
sub, err := newContext(name, cmd, nil, c)
if err != nil {
return err
}
if err := validate(sub); err != nil {
return err
}
}
return nil
}
|
espra/espra | e1bd33217c00729a105dcea48e5d36349dfb5225 | pkg/cli: improve flag and env name validation | diff --git a/pkg/cli/cli.go b/pkg/cli/cli.go
index da1636e..75b2926 100644
--- a/pkg/cli/cli.go
+++ b/pkg/cli/cli.go
@@ -1,361 +1,365 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// Package cli provides an easy way to build command line applications.
//
// If the value for a subcommand is nil, it is treated as if the command didn't
// even exist. This is useful for disabling the builtin subcommands like
// completion and help.
package cli
import (
"fmt"
"os"
"sort"
"strings"
"web4.cc/pkg/process"
)
-var _ Command = (*Version)(nil)
+var (
+ _ cmdrunner = (*Version)(nil)
+ _ cmdrunner = (*plain)(nil)
+)
// Command specifies the basic interface that a command needs to implement. For
// more fine-grained control, commands can also implement the Completer, Helper,
// and Runner interfaces.
type Command interface {
Info() *Info
}
// Completer defines the interface that a command should implement if it wants
// to provide custom autocompletion on command line arguments.
type Completer interface {
Complete(c *Context) Completion
}
type Completion struct {
}
// Context provides a way to access processed command line info at specific
// points within the command hierarchy.
type Context struct {
args []string
cmd Command
flags []*Flag
name string
opts *optspec
parent *Context
sub Subcommands
}
// Args returns the command line arguments for the current context.
func (c *Context) Args() []string {
return clone(c.args)
}
// Command returns the Command associated with the current context. By doing a
// type assertion on the returned value, this can be used to access field values
// of the parent or root context.
func (c *Context) Command() Command {
return c.cmd
}
+// Flags returns the command line flags for the current context.
+func (c *Context) Flags() []*Flag {
+ flags := make([]*Flag, len(c.flags))
+ copy(flags, c.flags)
+ return flags
+}
+
// FullName returns the space separated sequence of command names, all the way
// from the root to the current context.
func (c *Context) FullName() string {
path := []string{c.name}
for c.parent != nil {
c = c.parent
path = append(path, c.name)
}
sort.Sort(sort.Reverse(sort.StringSlice(path)))
return strings.Join(path, " ")
}
// Help returns the help text for a command. Commands wishing to override the
// auto-generated help text, must implement the Helper interface.
func (c *Context) Help() string {
return c.help()
}
// Name returns the command name for the current context.
func (c *Context) Name() string {
return c.name
}
-// Flags returns the command line flags for the current context.
-func (c *Context) Flags() []*Flag {
- flags := make([]*Flag, len(c.flags))
- copy(flags, c.flags)
- return flags
-}
-
// Parent returns the parent of the current context.
func (c *Context) Parent() *Context {
return c.parent
}
// PrintHelp outputs the command's help text to stdout.
func (c *Context) PrintHelp() {
fmt.Print(c.help())
}
// Program returns the program name, i.e. the command name for the root context.
func (c *Context) Program() string {
root := c.Root()
if root == nil {
return c.name
}
return root.name
}
// Root returns the root context.
func (c *Context) Root() *Context {
for c.parent != nil {
c = c.parent
}
return c
}
// Flag defines a command line flag derived from a Command struct.
type Flag struct {
cmpl int
env []string
field int
help string
hide bool
inherit bool
label string
long []string
multi bool
req bool
short []string
typ string
}
// Env returns the environment variables associated with the flag.
func (f *Flag) Env() []string {
return clone(f.env)
}
// Help returns the help info for the flag.
func (f *Flag) Help() string {
return f.help
}
// Hidden returns whether the flag should be hidden from help output.
func (f *Flag) Hidden() bool {
return f.hide
}
// Inherited returns whether the flag will be inherited by any subcommands.
func (f *Flag) Inherited() bool {
return f.inherit
}
// Label returns the descriptive label for the flag option. This is primarily
// used to generate the help text, e.g.
//
// --input-file path
//
// Boolean flags will always result in an empty string as the label. For all
// other types, the following sources are used in priority order:
//
// - Any non-empty value set using the "label" struct tag on the field.
//
// - Any labels that can be extracted from the help info by looking for the
// first non-whitespace separated set of characters enclosed within {braces}
// within the "help" struct tag on the field.
//
// - The field type, e.g. string, int, duration, etc. For non-builtin types,
// this will simply state "value".
func (f *Flag) Label() string {
return f.label
}
// LongFlags returns the associated long flags.
func (f *Flag) LongFlags() []string {
return clone(f.long)
}
// Multi returns whether the flag can be set multiple times.
func (f *Flag) Multi() bool {
return f.multi
}
// Required returns whether the flag has been marked as required.
func (f *Flag) Required() bool {
return f.req
}
// ShortFlags returns the associated short flags.
func (f *Flag) ShortFlags() []string {
return clone(f.short)
}
// Helper defines the interface that a command should implement if it wants
// fine-grained control over the help text. Otherwise, the text is
// auto-generated from the command name, Info() output, and struct fields.
type Helper interface {
Help(c *Context) string
}
// Info
type Info struct {
Short string
}
// Option configures the root context.
type Option func(c *Context)
// Runner defines the interface that a command should implement to handle
// command line arguments.
type Runner interface {
Run(c *Context) error
}
// Subcommands defines the field type for defining subcommands on a struct.
type Subcommands map[string]Command
// Version provides a default implementation to use as a subcommand to output
// version info.
type Version string
func (v Version) Info() *Info {
return &Info{
Short: "Show the #{Program} version info",
}
}
func (v Version) Run(c *Context) error {
fmt.Println(v)
return nil
}
+type cmdrunner interface {
+ Command
+ Runner
+}
+
type plain struct {
info *Info
run func(c *Context) error
}
func (p plain) Info() *Info {
return p.info
}
func (p plain) Run(c *Context) error {
return p.run(c)
}
type optspec struct {
autoenv bool
envprefix string
showenv bool
validate bool
}
// EnvPrefix overrides the default prefix of the program name when automatically
-// deriving environment variables.
-//
-// Use an empty string if the environment variables should be unprefixed. For
-// non-empty values, if the given prefix doesn't end in an underscore, one will
-// be appended automatically.
+// deriving environment variables. Use an empty string if the environment
+// variables should be unprefixed.
//
-// This function will panic if the given prefix is not made up of uppercase
-// letters and underscores.
+// This function will panic if the given prefix is not empty or made up of
+// uppercase letters and underscores. Non-empty values must not have a trailing
+// underscore. One will be appended automatically.
func EnvPrefix(s string) func(*Context) {
- for i := 0; i < len(s); i++ {
- if !isEnvChar(s[i]) {
- panic(fmt.Errorf("cli: invalid env prefix: %q", s))
- }
+ if !isEnv(s) {
+ panic(fmt.Errorf("cli: invalid env prefix: %q", s))
}
if s != "" {
- if s[len(s)-1] != '_' {
- s += "_"
- }
+ s += "_"
}
return func(c *Context) {
c.opts.envprefix = s
}
}
// FromFunc will define a new Command from the given run function and short info
// string. It's useful for defining commands where there's no need to handle any
// command line flags.
func FromFunc(run func(c *Context) error, info string) Command {
return plain{
info: &Info{Short: info},
run: run,
}
}
// NoAutoEnv disables the automatic derivation of environment variable names
// from the exported field names of Command structs.
func NoAutoEnv(c *Context) {
c.opts.autoenv = false
}
// NoValidate disables the automatic validation of all commands and subcommands.
// Validation adds to the startup time, and can be instead done by calling the
// Validate function directly from within tests.
func NoValidate(c *Context) {
c.opts.validate = false
}
// ShowEnvHelp emits the associated environment variable names when
// auto-generating help text.
func ShowEnvHelp(c *Context) {
c.opts.showenv = true
}
// Run processes the command line arguments in the context of the given Command.
// The given program name will be used to auto-generate help text and error
// messages.
func Run(name string, cmd Command, args []string, opts ...Option) error {
if len(args) < 1 {
return fmt.Errorf("cli: missing executable path in the given args slice")
}
c, err := newRoot(name, cmd, args[1:], opts...)
if err != nil {
return err
}
if c.opts.validate {
if err := validate(c); err != nil {
return err
}
}
return c.run()
}
// RunThenExit provides a utility function for the common case of calling Run
// with os.Args, printing the error on failure, and exiting with a status code
// of 1 on failure, and 0 on success.
//
// The function will use process.Exit instead of os.Exit so that registered exit
// handlers will run.
func RunThenExit(name string, cmd Command, opts ...Option) {
err := Run(name, cmd, os.Args, opts...)
if err != nil {
printErrorf("%s failed: %s", name, err)
process.Exit(1)
}
process.Exit(0)
}
// Validate ensures that the given Command and all descendants have compliant
// struct tags and command names. Without this, validation only happens for the
// specific commands when they are executed on the command line.
func Validate(name string, cmd Command, opts ...Option) error {
c, err := newRoot(name, cmd, nil, opts...)
if err != nil {
return err
}
return validate(c)
}
+// NOTE(tav): We return copies of slices to callers so that they don't
+// accidentally mutate them.
func clone(xs []string) []string {
ys := make([]string, len(xs))
copy(ys, xs)
return ys
}
func printErrorf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format+"\n", args...)
}
diff --git a/pkg/cli/context.go b/pkg/cli/context.go
index cd1e517..dc6dfe4 100644
--- a/pkg/cli/context.go
+++ b/pkg/cli/context.go
@@ -1,452 +1,492 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
package cli
import (
"encoding"
"fmt"
"reflect"
"strings"
"time"
"web4.cc/pkg/ident"
)
var (
typeCompletion = reflect.TypeOf(&Completion{})
typeContext = reflect.TypeOf(&Context{})
typeDuration = reflect.TypeOf(time.Duration(0))
typeSubcommands = reflect.TypeOf(Subcommands{})
typeTextUnmarshaler = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
typeTime = reflect.TypeOf(time.Time{})
)
func (c *Context) help() string {
impl, ok := c.cmd.(Helper)
if ok {
return impl.Help(c)
}
b := strings.Builder{}
b.WriteByte('\n')
return b.String()
}
func (c *Context) init() error {
ptr := false
rv := reflect.ValueOf(c.cmd)
oriType := rv.Type()
if rv.Kind() == reflect.Ptr {
ptr = true
rv = rv.Elem()
}
// Extract the subcommands mapping if a field with the right name and type
// exists on a struct.
rt := rv.Type()
if rv.Kind() == reflect.Struct {
field, ok := rt.FieldByName("Subcommands")
if ok && field.Type == typeSubcommands {
c.sub = rv.FieldByName("Subcommands").Interface().(Subcommands)
}
} else {
ptr = false
}
// Skip processing of flags if the command isn't a struct pointer.
if !ptr {
return nil
}
// Process command line flags from the struct definition.
seen := map[string]string{}
flen := rt.NumField()
outer:
for i := 0; i < flen; i++ {
field := rt.Field(i)
tag := field.Tag
// Skip invalid fields.
if field.PkgPath != "" || field.Anonymous || tag == "" {
continue
}
// Process the field name.
name, err := ident.FromPascal(field.Name)
if err != nil {
return fmt.Errorf(
"cli: could not convert field name %s on %s: %s",
field.Name, oriType, err,
)
}
// Set defaults.
flag := &Flag{
cmpl: -1,
field: i,
help: strings.TrimSpace(tag.Get("help")),
label: strings.TrimSpace(tag.Get("label")),
}
lflag := name.ToKebab()
if prev, ok := seen[lflag]; ok {
return fmt.Errorf(
"cli: the derived --%s flag for field %s conflicts with %s on %s",
lflag, field.Name, prev, oriType,
)
}
flag.long = append(flag.long, lflag)
seen[lflag] = field.Name
optspec := c.Root().opts
if optspec.autoenv {
env := optspec.envprefix + name.ToScreamingSnake()
if prev, ok := seen[env]; ok {
return fmt.Errorf(
"cli: the derived environment variable %s for field %s conflicts with %s on %s",
env, field.Name, prev, oriType,
)
}
flag.env = []string{env}
seen[env] = field.Name
}
// If no label has been specified, see if the help text has an embedded
// label.
if flag.label == "" && flag.help != "" {
flag.help, flag.label = extractLabel(flag.help)
}
// Process the cli tag.
for _, opt := range strings.Split(tag.Get("cli"), " ") {
opt = strings.TrimSpace(opt)
if opt == "" {
continue
}
if opt == "-" {
continue outer
}
if opt == "!autoenv" {
flag.env = flag.env[1:]
continue
}
if opt == "!autoflag" {
flag.long = flag.long[1:]
continue
}
- if opt == "hide" {
+ if opt == "hidden" {
flag.hide = true
continue
}
- if opt == "inherit" {
+ if opt == "inherited" {
flag.inherit = true
continue
}
- if opt == "require" {
+ if opt == "required" {
flag.req = true
continue
}
if strings.HasPrefix(opt, "-") {
if strings.ToLower(opt) != opt {
goto invalid
}
- if len(opt) == 2 && isFlagChar(opt[1]) {
+ if len(opt) == 2 && isShortFlag(opt[1]) {
sflag := opt[1:]
if prev, ok := seen[sflag]; ok {
return fmt.Errorf(
"cli: the -%s flag from field %s conflicts with %s on %s",
sflag, field.Name, prev, oriType,
)
}
flag.short = append(flag.short, sflag)
seen[sflag] = field.Name
continue
}
if strings.HasPrefix(opt, "--") && len(opt) >= 4 {
lflag := opt[2:]
- for j := 0; j < len(lflag); j++ {
- if !isFlagChar(lflag[j]) {
- goto invalid
- }
+ if !isLongFlag(lflag) {
+ goto invalid
}
if prev, ok := seen[lflag]; ok {
return fmt.Errorf(
"cli: the --%s flag from field %s conflicts with %s on %s",
lflag, field.Name, prev, oriType,
)
}
flag.long = append(flag.long, lflag)
seen[lflag] = field.Name
continue
}
invalid:
return fmt.Errorf(
"cli: invalid flag value %q found for field %s on %s",
opt, field.Name, oriType,
)
}
if opt == strings.ToUpper(opt) {
- isEnv := true
- for j := 0; j < len(opt); j++ {
- if !isEnvChar(opt[i]) {
- isEnv = false
- break
- }
+ if !isEnv(opt) {
+ return fmt.Errorf(
+ "cli: invalid environment variable %q found for field %s on %s",
+ opt, field.Name, oriType,
+ )
}
- if isEnv {
- if prev, ok := seen[opt]; ok {
- return fmt.Errorf(
- "cli: the environment variable %s for field %s conflicts with %s on %s",
- opt, field.Name, prev, oriType,
- )
- }
- flag.env = append(flag.env, opt)
- seen[opt] = field.Name
- continue
+ if prev, ok := seen[opt]; ok {
+ return fmt.Errorf(
+ "cli: the environment variable %s for field %s conflicts with %s on %s",
+ opt, field.Name, prev, oriType,
+ )
}
+ flag.env = append(flag.env, opt)
+ seen[opt] = field.Name
+ continue
}
if strings.HasPrefix(opt, "Complete") {
meth, ok := oriType.MethodByName(opt)
if !ok {
return fmt.Errorf(
"cli: completer method %s not found for field %s on %s",
opt, field.Name, oriType,
)
}
if errmsg := isCompleter(meth.Type); errmsg != "" {
return fmt.Errorf(
"cli: invalid completer method %s for field %s on %s: %s",
opt, field.Name, oriType, errmsg,
)
}
if flag.cmpl != -1 {
return fmt.Errorf(
"cli: completer already set for field %s on %s",
field.Name, oriType,
)
}
flag.cmpl = meth.Index
} else {
return fmt.Errorf(
"cli: invalid cli tag value %q for field %s on %s",
opt, field.Name, oriType,
)
}
}
// Figure out the flag type.
flag.typ = getFlagType(field.Type, false)
if flag.typ == "" {
return fmt.Errorf(
"cli: unsupported flag type %s for field %s on %s",
field.Type, field.Name, oriType,
)
}
if strings.HasPrefix(flag.typ, "[]") {
flag.multi = true
}
if flag.typ == "bool" {
flag.label = ""
} else if flag.label == "" {
flag.label = flag.typ
}
c.flags = append(c.flags, flag)
}
return nil
}
func (c *Context) run() error {
if c.parent != nil || !c.opts.validate {
if err := c.init(); err != nil {
return err
}
}
cmd, ok := c.cmd.(Runner)
if !ok {
return nil
}
return cmd.Run(c)
}
func extractLabel(help string) (string, string) {
end := len(help)
for i := 0; i < end; i++ {
if help[i] == '{' {
for j := i + 1; j < end; j++ {
char := help[j]
if char == ' ' {
break
}
if char == '}' {
if j-i == 1 {
break
}
label := help[i+1 : j]
return help[:i] + label + help[j+1:], label
}
}
}
}
return help, ""
}
func getFlagType(rt reflect.Type, slice bool) string {
switch kind := rt.Kind(); kind {
case reflect.Bool:
if slice {
return ""
}
return "bool"
case reflect.Float32:
return "float32"
case reflect.Float64:
return "float64"
case reflect.Int:
return "int"
case reflect.Int8:
return "int8"
case reflect.Int16:
return "int16"
case reflect.Int32:
return "int32"
case reflect.Int64:
switch rt {
case typeDuration:
return "duration"
default:
return "int64"
}
case reflect.Interface, reflect.Ptr, reflect.Struct:
if rt == typeTime {
return "rfc3339"
}
switch kind {
case reflect.Ptr:
if rt.Elem() == typeTime {
return "rfc3339"
}
case reflect.Struct:
rt = reflect.PtrTo(rt)
}
if rt.Implements(typeTextUnmarshaler) {
return "value"
}
return ""
case reflect.Slice:
if slice {
// Only byte slices are supported as a potential slice type within a
// slice.
if rt.Elem().Kind() == reflect.Uint8 {
return "string"
}
return ""
}
if rt.Elem().Kind() == reflect.Uint8 {
return "string"
}
elem := getFlagType(rt.Elem(), true)
if elem == "" {
return elem
}
return "[]" + elem
case reflect.String:
return "string"
case reflect.Uint:
return "int"
case reflect.Uint8:
return "uint8"
case reflect.Uint16:
return "uint16"
case reflect.Uint32:
return "uint32"
case reflect.Uint64:
return "uint64"
default:
return ""
}
}
// NOTE(tav): These checks need to be kept in sync with any changes to the
// Completer interface.
func isCompleter(rt reflect.Type) string {
if n := rt.NumIn(); n != 2 {
return fmt.Sprintf("method must have 1 argument, not %d", n-1)
}
if in := rt.In(1); in != typeContext {
return fmt.Sprintf("method's argument must be a *cli.Context, not %s", in)
}
if rt.NumOut() != 1 {
return "method must have only one return value"
}
if out := rt.Out(0); out != typeCompletion {
return fmt.Sprintf("method's return value must be a *cli.Completion, not %s", out)
}
return ""
}
-func isEnvChar(char byte) bool {
- return char == '_' || (char >= 'A' && char <= 'Z') || (char >= '0' && char <= '9')
+func isEnv(env string) bool {
+ last := len(env) - 1
+ for i := 0; i < len(env); i++ {
+ char := env[i]
+ if i == 0 {
+ if char < 'A' || char > 'Z' {
+ return false
+ }
+ continue
+ }
+ if char == '_' {
+ if i == last {
+ return false
+ }
+ continue
+ }
+ if (char >= 'A' && char <= 'Z') || (char >= '0' && char <= '9') {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+func isLongFlag(flag string) bool {
+ last := len(flag) - 1
+ for i := 0; i < len(flag); i++ {
+ char := flag[i]
+ if i == 0 {
+ if char < 'a' || char > 'z' {
+ return false
+ }
+ continue
+ }
+ if char == '-' {
+ if i == last {
+ return false
+ }
+ continue
+ }
+ if (char >= 'a' && char <= 'z') || (char >= '0' && char <= '9') {
+ continue
+ }
+ return false
+ }
+ return true
}
-func isFlagChar(char byte) bool {
- return char == '-' || (char >= 'a' && char <= 'z') || (char >= '0' && char <= '9')
+func isShortFlag(char byte) bool {
+ return (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') ||
+ (char >= '0' && char <= '9')
}
func isValidName(name string) bool {
for i := 0; i < len(name); i++ {
char := name[i]
if char == '-' || (char >= 'a' && char <= 'z') {
continue
}
return false
}
return true
}
func newContext(name string, cmd Command, args []string, parent *Context) (*Context, error) {
if !isValidName(name) {
if parent == nil {
return nil, fmt.Errorf("cli: invalid program name: %q", name)
}
fname := parent.FullName()
return nil, fmt.Errorf("cli: invalid name %q for %q subcommand", name, fname)
}
c := &Context{
args: args,
cmd: cmd,
name: name,
}
if parent == nil {
c.opts = &optspec{
autoenv: true,
validate: true,
}
} else {
c.parent = parent
}
return c, nil
}
func newRoot(name string, cmd Command, args []string, opts ...Option) (*Context, error) {
if cmd == nil {
return nil, fmt.Errorf("cli: the Command instance for %q is nil", name)
}
c, err := newContext(name, cmd, args, nil)
if err != nil {
return nil, err
}
upper := strings.ToUpper(name)
c.opts.envprefix = strings.ReplaceAll(upper, "-", "_") + "_"
for _, opt := range opts {
opt(c)
}
return c, nil
}
func validate(c *Context) error {
if err := c.init(); err != nil {
return err
}
for name, cmd := range c.sub {
if cmd == nil {
continue
}
sub, err := newContext(name, cmd, nil, c)
if err != nil {
return err
}
if err := validate(sub); err != nil {
return err
}
}
return nil
}
|
espra/espra | 71717c1529b6c99164ca853afa4eadb6e5fce301 | pkg/cli: add validation, improve interfaces and mem usage | diff --git a/pkg/cli/cli.go b/pkg/cli/cli.go
index 3bb7c34..da1636e 100644
--- a/pkg/cli/cli.go
+++ b/pkg/cli/cli.go
@@ -1,326 +1,361 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// Package cli provides an easy way to build command line applications.
+//
+// If the value for a subcommand is nil, it is treated as if the command didn't
+// even exist. This is useful for disabling the builtin subcommands like
+// completion and help.
package cli
import (
"fmt"
"os"
"sort"
"strings"
"web4.cc/pkg/process"
)
var _ Command = (*Version)(nil)
-// Command specifies the minimal set of methods that a Command needs to
-// implement. Commands wishing to have more fine-grained control, can also
-// implement the Completer and Usage interfaces.
+// Command specifies the basic interface that a command needs to implement. For
+// more fine-grained control, commands can also implement the Completer, Helper,
+// and Runner interfaces.
type Command interface {
Info() *Info
- Run(c *Context) error
}
-// Completer defines the interface that a Command should implement if it wants
+// Completer defines the interface that a command should implement if it wants
// to provide custom autocompletion on command line arguments.
type Completer interface {
- Complete()
+ Complete(c *Context) Completion
+}
+
+type Completion struct {
}
// Context provides a way to access processed command line info at specific
// points within the command hierarchy.
type Context struct {
- args []string
- cmd Command
- envprefix string
- flags []*Flag
- name string
- parent *Context
- root *Context
- showenv bool
- skipenv bool
- sub Subcommands
+ args []string
+ cmd Command
+ flags []*Flag
+ name string
+ opts *optspec
+ parent *Context
+ sub Subcommands
}
// Args returns the command line arguments for the current context.
func (c *Context) Args() []string {
return clone(c.args)
}
// Command returns the Command associated with the current context. By doing a
// type assertion on the returned value, this can be used to access field values
// of the parent or root context.
func (c *Context) Command() Command {
return c.cmd
}
// FullName returns the space separated sequence of command names, all the way
// from the root to the current context.
func (c *Context) FullName() string {
path := []string{c.name}
for c.parent != nil {
c = c.parent
path = append(path, c.name)
}
sort.Sort(sort.Reverse(sort.StringSlice(path)))
return strings.Join(path, " ")
}
+// Help returns the help text for a command. Commands wishing to override the
+// auto-generated help text, must implement the Helper interface.
+func (c *Context) Help() string {
+ return c.help()
+}
+
// Name returns the command name for the current context.
func (c *Context) Name() string {
return c.name
}
// Flags returns the command line flags for the current context.
func (c *Context) Flags() []*Flag {
flags := make([]*Flag, len(c.flags))
copy(flags, c.flags)
return flags
}
// Parent returns the parent of the current context.
func (c *Context) Parent() *Context {
return c.parent
}
-// PrintUsage outputs the command's help text to stdout.
-func (c *Context) PrintUsage() {
- fmt.Print(c.usage())
+// PrintHelp outputs the command's help text to stdout.
+func (c *Context) PrintHelp() {
+ fmt.Print(c.help())
}
// Program returns the program name, i.e. the command name for the root context.
func (c *Context) Program() string {
- if c.root == nil {
+ root := c.Root()
+ if root == nil {
return c.name
}
- return c.root.name
+ return root.name
}
// Root returns the root context.
func (c *Context) Root() *Context {
- if c.root == nil {
- return c
+ for c.parent != nil {
+ c = c.parent
}
- return c.root
-}
-
-// Usage returns the help text for a command. Commands wishing to override the
-// auto-generated help text, must implement the Usage interface.
-func (c *Context) Usage() string {
- return c.usage()
+ return c
}
// Flag defines a command line flag derived from a Command struct.
type Flag struct {
cmpl int
env []string
field int
help string
hide bool
inherit bool
label string
long []string
multi bool
req bool
short []string
typ string
}
// Env returns the environment variables associated with the flag.
func (f *Flag) Env() []string {
return clone(f.env)
}
// Help returns the help info for the flag.
func (f *Flag) Help() string {
return f.help
}
// Hidden returns whether the flag should be hidden from help output.
func (f *Flag) Hidden() bool {
return f.hide
}
// Inherited returns whether the flag will be inherited by any subcommands.
func (f *Flag) Inherited() bool {
return f.inherit
}
// Label returns the descriptive label for the flag option. This is primarily
-// used to generate the usage help text, e.g.
+// used to generate the help text, e.g.
//
// --input-file path
//
// Boolean flags will always result in an empty string as the label. For all
// other types, the following sources are used in priority order:
//
// - Any non-empty value set using the "label" struct tag on the field.
//
// - Any labels that can be extracted from the help info by looking for the
// first non-whitespace separated set of characters enclosed within {braces}
// within the "help" struct tag on the field.
//
// - The field type, e.g. string, int, duration, etc. For non-builtin types,
// this will simply state "value".
func (f *Flag) Label() string {
return f.label
}
// LongFlags returns the associated long flags.
func (f *Flag) LongFlags() []string {
return clone(f.long)
}
// Multi returns whether the flag can be set multiple times.
func (f *Flag) Multi() bool {
return f.multi
}
// Required returns whether the flag has been marked as required.
func (f *Flag) Required() bool {
return f.req
}
// ShortFlags returns the associated short flags.
func (f *Flag) ShortFlags() []string {
return clone(f.short)
}
+// Helper defines the interface that a command should implement if it wants
+// fine-grained control over the help text. Otherwise, the text is
+// auto-generated from the command name, Info() output, and struct fields.
+type Helper interface {
+ Help(c *Context) string
+}
+
// Info
type Info struct {
Short string
}
// Option configures the root context.
type Option func(c *Context)
+// Runner defines the interface that a command should implement to handle
+// command line arguments.
+type Runner interface {
+ Run(c *Context) error
+}
+
// Subcommands defines the field type for defining subcommands on a struct.
type Subcommands map[string]Command
-// Usage defines the interface that a Command should implement if it wants
-// fine-grained control over the usage output. Otherwise, the usage is
-// auto-generated from the command name, Info() output, and struct fields.
-type Usage interface {
- Usage(c *Context) string
-}
-
// Version provides a default implementation to use as a subcommand to output
// version info.
type Version string
func (v Version) Info() *Info {
return &Info{
Short: "Show the #{Program} version info",
}
}
func (v Version) Run(c *Context) error {
fmt.Println(v)
return nil
}
type plain struct {
info *Info
run func(c *Context) error
}
func (p plain) Info() *Info {
return p.info
}
func (p plain) Run(c *Context) error {
return p.run(c)
}
+type optspec struct {
+ autoenv bool
+ envprefix string
+ showenv bool
+ validate bool
+}
+
// EnvPrefix overrides the default prefix of the program name when automatically
// deriving environment variables.
//
// Use an empty string if the environment variables should be unprefixed. For
// non-empty values, if the given prefix doesn't end in an underscore, one will
// be appended automatically.
//
// This function will panic if the given prefix is not made up of uppercase
// letters and underscores.
func EnvPrefix(s string) func(*Context) {
for i := 0; i < len(s); i++ {
if !isEnvChar(s[i]) {
panic(fmt.Errorf("cli: invalid env prefix: %q", s))
}
}
if s != "" {
if s[len(s)-1] != '_' {
s += "_"
}
}
return func(c *Context) {
- c.envprefix = s
+ c.opts.envprefix = s
}
}
// FromFunc will define a new Command from the given run function and short info
// string. It's useful for defining commands where there's no need to handle any
// command line flags.
func FromFunc(run func(c *Context) error, info string) Command {
return plain{
info: &Info{Short: info},
run: run,
}
}
-// SkipEnv disables the automatic derivation of environment variable names from
-// the exported field names of Command structs.
-func SkipEnv(c *Context) {
- c.skipenv = true
+// NoAutoEnv disables the automatic derivation of environment variable names
+// from the exported field names of Command structs.
+func NoAutoEnv(c *Context) {
+ c.opts.autoenv = false
}
-// ShowEnv emits the associated environment variable names when auto-generating
-// usage text.
-func ShowEnv(c *Context) {
- c.showenv = true
+// NoValidate disables the automatic validation of all commands and subcommands.
+// Validation adds to the startup time, and can be instead done by calling the
+// Validate function directly from within tests.
+func NoValidate(c *Context) {
+ c.opts.validate = false
+}
+
+// ShowEnvHelp emits the associated environment variable names when
+// auto-generating help text.
+func ShowEnvHelp(c *Context) {
+ c.opts.showenv = true
}
// Run processes the command line arguments in the context of the given Command.
-// The given program name will be used to auto-generate usage text and error
+// The given program name will be used to auto-generate help text and error
// messages.
func Run(name string, cmd Command, args []string, opts ...Option) error {
if len(args) < 1 {
- return fmt.Errorf("cli: missing program name in the given args slice")
+ return fmt.Errorf("cli: missing executable path in the given args slice")
}
- c, err := newContext(name, cmd, args[1:], nil)
+ c, err := newRoot(name, cmd, args[1:], opts...)
if err != nil {
return err
}
- upper := strings.ToUpper(name)
- c.envprefix = strings.ReplaceAll(upper, "-", "_") + "_"
- for _, opt := range opts {
- opt(c)
+ if c.opts.validate {
+ if err := validate(c); err != nil {
+ return err
+ }
}
return c.run()
}
// RunThenExit provides a utility function for the common case of calling Run
// with os.Args, printing the error on failure, and exiting with a status code
// of 1 on failure, and 0 on success.
//
// The function will use process.Exit instead of os.Exit so that registered exit
// handlers will run.
func RunThenExit(name string, cmd Command, opts ...Option) {
err := Run(name, cmd, os.Args, opts...)
if err != nil {
printErrorf("%s failed: %s", name, err)
process.Exit(1)
}
process.Exit(0)
}
+// Validate ensures that the given Command and all descendants have compliant
+// struct tags and command names. Without this, validation only happens for the
+// specific commands when they are executed on the command line.
+func Validate(name string, cmd Command, opts ...Option) error {
+ c, err := newRoot(name, cmd, nil, opts...)
+ if err != nil {
+ return err
+ }
+ return validate(c)
+}
+
func clone(xs []string) []string {
ys := make([]string, len(xs))
copy(ys, xs)
return ys
}
func printErrorf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format+"\n", args...)
}
diff --git a/pkg/cli/context.go b/pkg/cli/context.go
index 19cbfeb..cd1e517 100644
--- a/pkg/cli/context.go
+++ b/pkg/cli/context.go
@@ -1,385 +1,452 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
package cli
import (
"encoding"
"fmt"
"reflect"
"strings"
"time"
"web4.cc/pkg/ident"
)
var (
+ typeCompletion = reflect.TypeOf(&Completion{})
+ typeContext = reflect.TypeOf(&Context{})
typeDuration = reflect.TypeOf(time.Duration(0))
typeSubcommands = reflect.TypeOf(Subcommands{})
typeTextUnmarshaler = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
typeTime = reflect.TypeOf(time.Time{})
)
+func (c *Context) help() string {
+ impl, ok := c.cmd.(Helper)
+ if ok {
+ return impl.Help(c)
+ }
+ b := strings.Builder{}
+ b.WriteByte('\n')
+ return b.String()
+}
+
func (c *Context) init() error {
ptr := false
rv := reflect.ValueOf(c.cmd)
oriType := rv.Type()
if rv.Kind() == reflect.Ptr {
ptr = true
rv = rv.Elem()
}
// Extract the subcommands mapping if a field with the right name and type
// exists on a struct.
rt := rv.Type()
if rv.Kind() == reflect.Struct {
field, ok := rt.FieldByName("Subcommands")
if ok && field.Type == typeSubcommands {
c.sub = rv.FieldByName("Subcommands").Interface().(Subcommands)
}
} else {
ptr = false
}
// Skip processing of flags if the command isn't a struct pointer.
if !ptr {
return nil
}
// Process command line flags from the struct definition.
seen := map[string]string{}
flen := rt.NumField()
outer:
for i := 0; i < flen; i++ {
field := rt.Field(i)
tag := field.Tag
// Skip invalid fields.
if field.PkgPath != "" || field.Anonymous || tag == "" {
continue
}
// Process the field name.
name, err := ident.FromPascal(field.Name)
if err != nil {
return fmt.Errorf(
"cli: could not convert field name %s on %s: %s",
field.Name, oriType, err,
)
}
// Set defaults.
flag := &Flag{
cmpl: -1,
field: i,
help: strings.TrimSpace(tag.Get("help")),
label: strings.TrimSpace(tag.Get("label")),
}
lflag := name.ToKebab()
if prev, ok := seen[lflag]; ok {
return fmt.Errorf(
"cli: the derived --%s flag for field %s conflicts with %s on %s",
lflag, field.Name, prev, oriType,
)
}
flag.long = append(flag.long, lflag)
seen[lflag] = field.Name
- root := c.Root()
- if !root.skipenv {
- env := root.envprefix + name.ToScreamingSnake()
+ optspec := c.Root().opts
+ if optspec.autoenv {
+ env := optspec.envprefix + name.ToScreamingSnake()
if prev, ok := seen[env]; ok {
return fmt.Errorf(
"cli: the derived environment variable %s for field %s conflicts with %s on %s",
env, field.Name, prev, oriType,
)
}
flag.env = []string{env}
seen[env] = field.Name
}
// If no label has been specified, see if the help text has an embedded
// label.
if flag.label == "" && flag.help != "" {
flag.help, flag.label = extractLabel(flag.help)
}
// Process the cli tag.
for _, opt := range strings.Split(tag.Get("cli"), " ") {
opt = strings.TrimSpace(opt)
if opt == "" {
continue
}
if opt == "-" {
continue outer
}
+ if opt == "!autoenv" {
+ flag.env = flag.env[1:]
+ continue
+ }
+ if opt == "!autoflag" {
+ flag.long = flag.long[1:]
+ continue
+ }
if opt == "hide" {
flag.hide = true
continue
}
if opt == "inherit" {
flag.inherit = true
continue
}
if opt == "require" {
flag.req = true
continue
}
- if opt == "skip:env" {
- flag.env = flag.env[1:]
- continue
- }
- if opt == "skip:flag" {
- flag.long = flag.long[1:]
- continue
- }
if strings.HasPrefix(opt, "-") {
if strings.ToLower(opt) != opt {
goto invalid
}
if len(opt) == 2 && isFlagChar(opt[1]) {
sflag := opt[1:]
if prev, ok := seen[sflag]; ok {
return fmt.Errorf(
"cli: the -%s flag from field %s conflicts with %s on %s",
sflag, field.Name, prev, oriType,
)
}
flag.short = append(flag.short, sflag)
seen[sflag] = field.Name
continue
}
if strings.HasPrefix(opt, "--") && len(opt) >= 4 {
lflag := opt[2:]
for j := 0; j < len(lflag); j++ {
if !isFlagChar(lflag[j]) {
goto invalid
}
}
if prev, ok := seen[lflag]; ok {
return fmt.Errorf(
"cli: the --%s flag from field %s conflicts with %s on %s",
lflag, field.Name, prev, oriType,
)
}
flag.long = append(flag.long, lflag)
seen[lflag] = field.Name
continue
}
invalid:
return fmt.Errorf(
"cli: invalid flag value %q found for field %s on %s",
opt, field.Name, oriType,
)
}
if opt == strings.ToUpper(opt) {
isEnv := true
for j := 0; j < len(opt); j++ {
if !isEnvChar(opt[i]) {
isEnv = false
break
}
}
if isEnv {
if prev, ok := seen[opt]; ok {
return fmt.Errorf(
"cli: the environment variable %s for field %s conflicts with %s on %s",
opt, field.Name, prev, oriType,
)
}
flag.env = append(flag.env, opt)
seen[opt] = field.Name
continue
}
}
if strings.HasPrefix(opt, "Complete") {
meth, ok := oriType.MethodByName(opt)
if !ok {
return fmt.Errorf(
"cli: completer method %s not found for field %s on %s",
opt, field.Name, oriType,
)
}
+ if errmsg := isCompleter(meth.Type); errmsg != "" {
+ return fmt.Errorf(
+ "cli: invalid completer method %s for field %s on %s: %s",
+ opt, field.Name, oriType, errmsg,
+ )
+ }
if flag.cmpl != -1 {
return fmt.Errorf(
"cli: completer already set for field %s on %s",
field.Name, oriType,
)
}
flag.cmpl = meth.Index
} else {
return fmt.Errorf(
"cli: invalid cli tag value %q for field %s on %s",
opt, field.Name, oriType,
)
}
}
// Figure out the flag type.
flag.typ = getFlagType(field.Type, false)
if flag.typ == "" {
return fmt.Errorf(
"cli: unsupported flag type %s for field %s on %s",
field.Type, field.Name, oriType,
)
}
if strings.HasPrefix(flag.typ, "[]") {
flag.multi = true
}
if flag.typ == "bool" {
flag.label = ""
} else if flag.label == "" {
flag.label = flag.typ
}
c.flags = append(c.flags, flag)
}
return nil
}
func (c *Context) run() error {
- if err := c.init(); err != nil {
- return err
+ if c.parent != nil || !c.opts.validate {
+ if err := c.init(); err != nil {
+ return err
+ }
}
- // root := c.root == nil
- return c.cmd.Run(c)
-}
-
-func (c *Context) usage() string {
- impl, ok := c.cmd.(Usage)
- if ok {
- return impl.Usage(c)
+ cmd, ok := c.cmd.(Runner)
+ if !ok {
+ return nil
}
- b := strings.Builder{}
- b.WriteByte('\n')
- return b.String()
+ return cmd.Run(c)
}
func extractLabel(help string) (string, string) {
end := len(help)
for i := 0; i < end; i++ {
if help[i] == '{' {
for j := i + 1; j < end; j++ {
char := help[j]
if char == ' ' {
break
}
if char == '}' {
if j-i == 1 {
break
}
label := help[i+1 : j]
return help[:i] + label + help[j+1:], label
}
}
}
}
return help, ""
}
func getFlagType(rt reflect.Type, slice bool) string {
switch kind := rt.Kind(); kind {
case reflect.Bool:
if slice {
return ""
}
return "bool"
case reflect.Float32:
return "float32"
case reflect.Float64:
return "float64"
case reflect.Int:
return "int"
case reflect.Int8:
return "int8"
case reflect.Int16:
return "int16"
case reflect.Int32:
return "int32"
case reflect.Int64:
switch rt {
case typeDuration:
return "duration"
default:
return "int64"
}
case reflect.Interface, reflect.Ptr, reflect.Struct:
if rt == typeTime {
return "rfc3339"
}
switch kind {
case reflect.Ptr:
if rt.Elem() == typeTime {
return "rfc3339"
}
case reflect.Struct:
rt = reflect.PtrTo(rt)
}
if rt.Implements(typeTextUnmarshaler) {
return "value"
}
return ""
case reflect.Slice:
if slice {
// Only byte slices are supported as a potential slice type within a
// slice.
if rt.Elem().Kind() == reflect.Uint8 {
return "string"
}
return ""
}
if rt.Elem().Kind() == reflect.Uint8 {
return "string"
}
elem := getFlagType(rt.Elem(), true)
if elem == "" {
return elem
}
return "[]" + elem
case reflect.String:
return "string"
case reflect.Uint:
return "int"
case reflect.Uint8:
return "uint8"
case reflect.Uint16:
return "uint16"
case reflect.Uint32:
return "uint32"
case reflect.Uint64:
return "uint64"
default:
return ""
}
}
+// NOTE(tav): These checks need to be kept in sync with any changes to the
+// Completer interface.
+func isCompleter(rt reflect.Type) string {
+ if n := rt.NumIn(); n != 2 {
+ return fmt.Sprintf("method must have 1 argument, not %d", n-1)
+ }
+ if in := rt.In(1); in != typeContext {
+ return fmt.Sprintf("method's argument must be a *cli.Context, not %s", in)
+ }
+ if rt.NumOut() != 1 {
+ return "method must have only one return value"
+ }
+ if out := rt.Out(0); out != typeCompletion {
+ return fmt.Sprintf("method's return value must be a *cli.Completion, not %s", out)
+ }
+ return ""
+}
+
func isEnvChar(char byte) bool {
return char == '_' || (char >= 'A' && char <= 'Z') || (char >= '0' && char <= '9')
}
func isFlagChar(char byte) bool {
return char == '-' || (char >= 'a' && char <= 'z') || (char >= '0' && char <= '9')
}
-func newContext(name string, cmd Command, args []string, parent *Context) (*Context, error) {
- if err := validateName(name); err != nil {
- return nil, err
+func isValidName(name string) bool {
+ for i := 0; i < len(name); i++ {
+ char := name[i]
+ if char == '-' || (char >= 'a' && char <= 'z') {
+ continue
+ }
+ return false
}
- if cmd == nil {
- fname := name
- if parent != nil {
- fname = parent.FullName() + " " + name
+ return true
+}
+
+func newContext(name string, cmd Command, args []string, parent *Context) (*Context, error) {
+ if !isValidName(name) {
+ if parent == nil {
+ return nil, fmt.Errorf("cli: invalid program name: %q", name)
}
- return nil, fmt.Errorf("cli: the Command instance for %q is nil", fname)
+ fname := parent.FullName()
+ return nil, fmt.Errorf("cli: invalid name %q for %q subcommand", name, fname)
}
c := &Context{
args: args,
cmd: cmd,
name: name,
}
- if parent != nil {
+ if parent == nil {
+ c.opts = &optspec{
+ autoenv: true,
+ validate: true,
+ }
+ } else {
c.parent = parent
- c.root = parent.root
}
return c, nil
}
-func validateName(name string) error {
- for i := 0; i < len(name); i++ {
- char := name[i]
- if char == '-' || (char >= 'a' && char <= 'z') {
+func newRoot(name string, cmd Command, args []string, opts ...Option) (*Context, error) {
+ if cmd == nil {
+ return nil, fmt.Errorf("cli: the Command instance for %q is nil", name)
+ }
+ c, err := newContext(name, cmd, args, nil)
+ if err != nil {
+ return nil, err
+ }
+ upper := strings.ToUpper(name)
+ c.opts.envprefix = strings.ReplaceAll(upper, "-", "_") + "_"
+ for _, opt := range opts {
+ opt(c)
+ }
+ return c, nil
+}
+
+func validate(c *Context) error {
+ if err := c.init(); err != nil {
+ return err
+ }
+ for name, cmd := range c.sub {
+ if cmd == nil {
continue
}
- return fmt.Errorf("cli: invalid command name: %q", name)
+ sub, err := newContext(name, cmd, nil, c)
+ if err != nil {
+ return err
+ }
+ if err := validate(sub); err != nil {
+ return err
+ }
}
return nil
}
|
espra/espra | d08002056a6da87f5cff3891ddfbb095c7070d27 | pkg/cli: process command structs for flags | diff --git a/pkg/cli/cli.go b/pkg/cli/cli.go
index c8d20dd..3bb7c34 100644
--- a/pkg/cli/cli.go
+++ b/pkg/cli/cli.go
@@ -1,220 +1,326 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// Package cli provides an easy way to build command line applications.
package cli
import (
"fmt"
"os"
"sort"
"strings"
"web4.cc/pkg/process"
)
+var _ Command = (*Version)(nil)
+
// Command specifies the minimal set of methods that a Command needs to
// implement. Commands wishing to have more fine-grained control, can also
// implement the Completer and Usage interfaces.
type Command interface {
- Info() string
+ Info() *Info
Run(c *Context) error
}
// Completer defines the interface that a Command should implement if it wants
// to provide custom autocompletion on command line arguments.
type Completer interface {
Complete()
}
// Context provides a way to access processed command line info at specific
// points within the command hierarchy.
type Context struct {
- args []string
- cmd Command
- name string
- opts []*Option
- parent *Context
- root *Context
- sub Subcommands
+ args []string
+ cmd Command
+ envprefix string
+ flags []*Flag
+ name string
+ parent *Context
+ root *Context
+ showenv bool
+ skipenv bool
+ sub Subcommands
}
// Args returns the command line arguments for the current context.
func (c *Context) Args() []string {
return clone(c.args)
}
// Command returns the Command associated with the current context. By doing a
// type assertion on the returned value, this can be used to access field values
// of the parent or root context.
func (c *Context) Command() Command {
return c.cmd
}
// FullName returns the space separated sequence of command names, all the way
// from the root to the current context.
func (c *Context) FullName() string {
path := []string{c.name}
for c.parent != nil {
c = c.parent
path = append(path, c.name)
}
sort.Sort(sort.Reverse(sort.StringSlice(path)))
return strings.Join(path, " ")
}
// Name returns the command name for the current context.
func (c *Context) Name() string {
return c.name
}
-// Options returns the command line arguments for the current context.
-func (c *Context) Options() []*Option {
- opts := make([]*Option, len(c.opts))
- copy(opts, c.opts)
- return opts
+// Flags returns the command line flags for the current context.
+func (c *Context) Flags() []*Flag {
+ flags := make([]*Flag, len(c.flags))
+ copy(flags, c.flags)
+ return flags
}
// Parent returns the parent of the current context.
func (c *Context) Parent() *Context {
return c.parent
}
-// Root returns the root context.
-func (c *Context) Root() *Context {
- if c.root == nil {
- return c
- }
- return c.root
+// PrintUsage outputs the command's help text to stdout.
+func (c *Context) PrintUsage() {
+ fmt.Print(c.usage())
}
-// RootName returns the command name for the root context.
-func (c *Context) RootName() string {
+// Program returns the program name, i.e. the command name for the root context.
+func (c *Context) Program() string {
if c.root == nil {
return c.name
}
return c.root.name
}
-// Usage returns the generated usage for the current context.
+// Root returns the root context.
+func (c *Context) Root() *Context {
+ if c.root == nil {
+ return c
+ }
+ return c.root
+}
+
+// Usage returns the help text for a command. Commands wishing to override the
+// auto-generated help text, must implement the Usage interface.
func (c *Context) Usage() string {
return c.usage()
}
-// Option defines the command line option derived from a Command struct.
-type Option struct {
- cmpl int
- env []string
- field int
- help string
- long []string
- req bool
- short []string
+// Flag defines a command line flag derived from a Command struct.
+type Flag struct {
+ cmpl int
+ env []string
+ field int
+ help string
+ hide bool
+ inherit bool
+ label string
+ long []string
+ multi bool
+ req bool
+ short []string
+ typ string
}
-// Env returns the environment variables associated with the option.
-func (o *Option) Env() []string {
- return clone(o.env)
+// Env returns the environment variables associated with the flag.
+func (f *Flag) Env() []string {
+ return clone(f.env)
}
-// Help returns the help info for the option.
-func (o *Option) Help() string {
- return o.help
+// Help returns the help info for the flag.
+func (f *Flag) Help() string {
+ return f.help
}
-// LongFlags returns the long flags associated with the option.
-func (o *Option) LongFlags() []string {
- return clone(o.long)
+// Hidden returns whether the flag should be hidden from help output.
+func (f *Flag) Hidden() bool {
+ return f.hide
}
-// Required returns whether the option has been marked as required.
-func (o *Option) Required() bool {
- return o.req
+// Inherited returns whether the flag will be inherited by any subcommands.
+func (f *Flag) Inherited() bool {
+ return f.inherit
}
-// ShortFlags returns the short flags associated with the option.
-func (o *Option) ShortFlags() []string {
- return clone(o.short)
+// Label returns the descriptive label for the flag option. This is primarily
+// used to generate the usage help text, e.g.
+//
+// --input-file path
+//
+// Boolean flags will always result in an empty string as the label. For all
+// other types, the following sources are used in priority order:
+//
+// - Any non-empty value set using the "label" struct tag on the field.
+//
+// - Any labels that can be extracted from the help info by looking for the
+// first non-whitespace separated set of characters enclosed within {braces}
+// within the "help" struct tag on the field.
+//
+// - The field type, e.g. string, int, duration, etc. For non-builtin types,
+// this will simply state "value".
+func (f *Flag) Label() string {
+ return f.label
}
+// LongFlags returns the associated long flags.
+func (f *Flag) LongFlags() []string {
+ return clone(f.long)
+}
+
+// Multi returns whether the flag can be set multiple times.
+func (f *Flag) Multi() bool {
+ return f.multi
+}
+
+// Required returns whether the flag has been marked as required.
+func (f *Flag) Required() bool {
+ return f.req
+}
+
+// ShortFlags returns the associated short flags.
+func (f *Flag) ShortFlags() []string {
+ return clone(f.short)
+}
+
+// Info
+type Info struct {
+ Short string
+}
+
+// Option configures the root context.
+type Option func(c *Context)
+
// Subcommands defines the field type for defining subcommands on a struct.
type Subcommands map[string]Command
// Usage defines the interface that a Command should implement if it wants
// fine-grained control over the usage output. Otherwise, the usage is
// auto-generated from the command name, Info() output, and struct fields.
type Usage interface {
Usage(c *Context) string
}
// Version provides a default implementation to use as a subcommand to output
// version info.
type Version string
-func (v Version) Info() string {
- return "Show the #{RootName} version info"
+func (v Version) Info() *Info {
+ return &Info{
+ Short: "Show the #{Program} version info",
+ }
}
func (v Version) Run(c *Context) error {
fmt.Println(v)
return nil
}
type plain struct {
- info string
+ info *Info
run func(c *Context) error
}
-func (p plain) Info() string {
+func (p plain) Info() *Info {
return p.info
}
func (p plain) Run(c *Context) error {
return p.run(c)
}
-// FromFunc will define a new Command from the given run function and info
+// EnvPrefix overrides the default prefix of the program name when automatically
+// deriving environment variables.
+//
+// Use an empty string if the environment variables should be unprefixed. For
+// non-empty values, if the given prefix doesn't end in an underscore, one will
+// be appended automatically.
+//
+// This function will panic if the given prefix is not made up of uppercase
+// letters and underscores.
+func EnvPrefix(s string) func(*Context) {
+ for i := 0; i < len(s); i++ {
+ if !isEnvChar(s[i]) {
+ panic(fmt.Errorf("cli: invalid env prefix: %q", s))
+ }
+ }
+ if s != "" {
+ if s[len(s)-1] != '_' {
+ s += "_"
+ }
+ }
+ return func(c *Context) {
+ c.envprefix = s
+ }
+}
+
+// FromFunc will define a new Command from the given run function and short info
// string. It's useful for defining commands where there's no need to handle any
-// command line options.
+// command line flags.
func FromFunc(run func(c *Context) error, info string) Command {
- return plain{info, run}
+ return plain{
+ info: &Info{Short: info},
+ run: run,
+ }
+}
+
+// SkipEnv disables the automatic derivation of environment variable names from
+// the exported field names of Command structs.
+func SkipEnv(c *Context) {
+ c.skipenv = true
+}
+
+// ShowEnv emits the associated environment variable names when auto-generating
+// usage text.
+func ShowEnv(c *Context) {
+ c.showenv = true
}
// Run processes the command line arguments in the context of the given Command.
// The given program name will be used to auto-generate usage text and error
// messages.
-func Run(name string, cmd Command, args []string) error {
+func Run(name string, cmd Command, args []string, opts ...Option) error {
if len(args) < 1 {
return fmt.Errorf("cli: missing program name in the given args slice")
}
- c, err := newContext(name, cmd, args, nil)
+ c, err := newContext(name, cmd, args[1:], nil)
if err != nil {
return err
}
+ upper := strings.ToUpper(name)
+ c.envprefix = strings.ReplaceAll(upper, "-", "_") + "_"
+ for _, opt := range opts {
+ opt(c)
+ }
return c.run()
}
// RunThenExit provides a utility function for the common case of calling Run
// with os.Args, printing the error on failure, and exiting with a status code
// of 1 on failure, and 0 on success.
//
// The function will use process.Exit instead of os.Exit so that registered exit
-// handlers will be triggered.
-func RunThenExit(name string, cmd Command) {
- err := Run(name, cmd, os.Args)
+// handlers will run.
+func RunThenExit(name string, cmd Command, opts ...Option) {
+ err := Run(name, cmd, os.Args, opts...)
if err != nil {
printErrorf("%s failed: %s", name, err)
process.Exit(1)
}
process.Exit(0)
}
func clone(xs []string) []string {
ys := make([]string, len(xs))
copy(ys, xs)
return ys
}
func printErrorf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format+"\n", args...)
}
diff --git a/pkg/cli/context.go b/pkg/cli/context.go
index 5df05b8..19cbfeb 100644
--- a/pkg/cli/context.go
+++ b/pkg/cli/context.go
@@ -1,170 +1,385 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
package cli
import (
+ "encoding"
"fmt"
"reflect"
"strings"
+ "time"
"web4.cc/pkg/ident"
)
-var typeSubcommands = reflect.TypeOf(Subcommands{})
+var (
+ typeDuration = reflect.TypeOf(time.Duration(0))
+ typeSubcommands = reflect.TypeOf(Subcommands{})
+ typeTextUnmarshaler = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ typeTime = reflect.TypeOf(time.Time{})
+)
func (c *Context) init() error {
ptr := false
rv := reflect.ValueOf(c.cmd)
- typ := rv.Type()
+ oriType := rv.Type()
if rv.Kind() == reflect.Ptr {
ptr = true
rv = rv.Elem()
}
// Extract the subcommands mapping if a field with the right name and type
// exists on a struct.
rt := rv.Type()
if rv.Kind() == reflect.Struct {
field, ok := rt.FieldByName("Subcommands")
if ok && field.Type == typeSubcommands {
c.sub = rv.FieldByName("Subcommands").Interface().(Subcommands)
}
} else {
ptr = false
}
- // Skip processing of options if the command isn't a struct pointer.
+ // Skip processing of flags if the command isn't a struct pointer.
if !ptr {
return nil
}
- // Process command line options from the struct definition.
+ // Process command line flags from the struct definition.
+ seen := map[string]string{}
flen := rt.NumField()
+outer:
for i := 0; i < flen; i++ {
field := rt.Field(i)
tag := field.Tag
// Skip invalid fields.
if field.PkgPath != "" || field.Anonymous || tag == "" {
continue
}
// Process the field name.
name, err := ident.FromPascal(field.Name)
if err != nil {
return fmt.Errorf(
- "cli: could not convert field name %q on %s: %s",
- field.Name, rt, err,
+ "cli: could not convert field name %s on %s: %s",
+ field.Name, oriType, err,
)
}
// Set defaults.
- cmpl := -1
- env := []string{name.ToScreamingSnake()}
- long := []string{name.ToKebab()}
- required := false
- short := []string{}
- // Process the flags tag.
- for _, flag := range strings.Split(tag.Get("flags"), " ") {
- if strings.TrimSpace(flag) == "" {
- continue
- }
- if strings.ToLower(flag) != flag {
+ flag := &Flag{
+ cmpl: -1,
+ field: i,
+ help: strings.TrimSpace(tag.Get("help")),
+ label: strings.TrimSpace(tag.Get("label")),
+ }
+ lflag := name.ToKebab()
+ if prev, ok := seen[lflag]; ok {
+ return fmt.Errorf(
+ "cli: the derived --%s flag for field %s conflicts with %s on %s",
+ lflag, field.Name, prev, oriType,
+ )
+ }
+ flag.long = append(flag.long, lflag)
+ seen[lflag] = field.Name
+ root := c.Root()
+ if !root.skipenv {
+ env := root.envprefix + name.ToScreamingSnake()
+ if prev, ok := seen[env]; ok {
return fmt.Errorf(
- "cli: invalid flag %q found for field name %q on %s",
- flag, field.Name, rt,
+ "cli: the derived environment variable %s for field %s conflicts with %s on %s",
+ env, field.Name, prev, oriType,
)
}
- if strings.HasPrefix(flag, "--") && len(flag) >= 4 {
- long = append(long, flag[2:])
+ flag.env = []string{env}
+ seen[env] = field.Name
+ }
+ // If no label has been specified, see if the help text has an embedded
+ // label.
+ if flag.label == "" && flag.help != "" {
+ flag.help, flag.label = extractLabel(flag.help)
+ }
+ // Process the cli tag.
+ for _, opt := range strings.Split(tag.Get("cli"), " ") {
+ opt = strings.TrimSpace(opt)
+ if opt == "" {
continue
}
- if strings.HasPrefix(flag, "-") && len(flag) == 2 {
- short = append(short, flag[1:])
+ if opt == "-" {
+ continue outer
+ }
+ if opt == "hide" {
+ flag.hide = true
continue
}
- return fmt.Errorf(
- "cli: invalid flag %q found for field name %q on %s",
- flag, field.Name, rt,
- )
- }
- // Process the opts tag.
- for _, opt := range strings.Split(tag.Get("opts"), ",") {
- if strings.TrimSpace(opt) == "" {
+ if opt == "inherit" {
+ flag.inherit = true
continue
}
- if opt == "required" {
- required = true
+ if opt == "require" {
+ flag.req = true
continue
}
- if opt == strings.ToUpper(opt) {
- env = append(env, opt)
+ if opt == "skip:env" {
+ flag.env = flag.env[1:]
+ continue
+ }
+ if opt == "skip:flag" {
+ flag.long = flag.long[1:]
continue
}
- meth, ok := typ.MethodByName(opt)
- if !ok {
+ if strings.HasPrefix(opt, "-") {
+ if strings.ToLower(opt) != opt {
+ goto invalid
+ }
+ if len(opt) == 2 && isFlagChar(opt[1]) {
+ sflag := opt[1:]
+ if prev, ok := seen[sflag]; ok {
+ return fmt.Errorf(
+ "cli: the -%s flag from field %s conflicts with %s on %s",
+ sflag, field.Name, prev, oriType,
+ )
+ }
+ flag.short = append(flag.short, sflag)
+ seen[sflag] = field.Name
+ continue
+ }
+ if strings.HasPrefix(opt, "--") && len(opt) >= 4 {
+ lflag := opt[2:]
+ for j := 0; j < len(lflag); j++ {
+ if !isFlagChar(lflag[j]) {
+ goto invalid
+ }
+ }
+ if prev, ok := seen[lflag]; ok {
+ return fmt.Errorf(
+ "cli: the --%s flag from field %s conflicts with %s on %s",
+ lflag, field.Name, prev, oriType,
+ )
+ }
+ flag.long = append(flag.long, lflag)
+ seen[lflag] = field.Name
+ continue
+ }
+ invalid:
return fmt.Errorf(
- "cli: could not find method %q for completing field name %q on %s",
- opt, field.Name, rt,
+ "cli: invalid flag value %q found for field %s on %s",
+ opt, field.Name, oriType,
)
}
- if cmpl != -1 {
+ if opt == strings.ToUpper(opt) {
+ isEnv := true
+ for j := 0; j < len(opt); j++ {
+ if !isEnvChar(opt[i]) {
+ isEnv = false
+ break
+ }
+ }
+ if isEnv {
+ if prev, ok := seen[opt]; ok {
+ return fmt.Errorf(
+ "cli: the environment variable %s for field %s conflicts with %s on %s",
+ opt, field.Name, prev, oriType,
+ )
+ }
+ flag.env = append(flag.env, opt)
+ seen[opt] = field.Name
+ continue
+ }
+ }
+ if strings.HasPrefix(opt, "Complete") {
+ meth, ok := oriType.MethodByName(opt)
+ if !ok {
+ return fmt.Errorf(
+ "cli: completer method %s not found for field %s on %s",
+ opt, field.Name, oriType,
+ )
+ }
+ if flag.cmpl != -1 {
+ return fmt.Errorf(
+ "cli: completer already set for field %s on %s",
+ field.Name, oriType,
+ )
+ }
+ flag.cmpl = meth.Index
+ } else {
return fmt.Errorf(
- "cli: completer already set for field name %q on %s",
- field.Name, rt,
+ "cli: invalid cli tag value %q for field %s on %s",
+ opt, field.Name, oriType,
)
}
- cmpl = meth.Index
}
- c.opts = append(c.opts, &Option{
- cmpl: cmpl,
- env: env,
- field: i,
- help: tag.Get("help"),
- long: long,
- req: required,
- short: short,
- })
+ // Figure out the flag type.
+ flag.typ = getFlagType(field.Type, false)
+ if flag.typ == "" {
+ return fmt.Errorf(
+ "cli: unsupported flag type %s for field %s on %s",
+ field.Type, field.Name, oriType,
+ )
+ }
+ if strings.HasPrefix(flag.typ, "[]") {
+ flag.multi = true
+ }
+ if flag.typ == "bool" {
+ flag.label = ""
+ } else if flag.label == "" {
+ flag.label = flag.typ
+ }
+ c.flags = append(c.flags, flag)
}
return nil
}
func (c *Context) run() error {
+ if err := c.init(); err != nil {
+ return err
+ }
+ // root := c.root == nil
return c.cmd.Run(c)
}
func (c *Context) usage() string {
- return ""
+ impl, ok := c.cmd.(Usage)
+ if ok {
+ return impl.Usage(c)
+ }
+ b := strings.Builder{}
+ b.WriteByte('\n')
+ return b.String()
+}
+
+func extractLabel(help string) (string, string) {
+ end := len(help)
+ for i := 0; i < end; i++ {
+ if help[i] == '{' {
+ for j := i + 1; j < end; j++ {
+ char := help[j]
+ if char == ' ' {
+ break
+ }
+ if char == '}' {
+ if j-i == 1 {
+ break
+ }
+ label := help[i+1 : j]
+ return help[:i] + label + help[j+1:], label
+ }
+ }
+ }
+ }
+ return help, ""
+}
+
+func getFlagType(rt reflect.Type, slice bool) string {
+ switch kind := rt.Kind(); kind {
+ case reflect.Bool:
+ if slice {
+ return ""
+ }
+ return "bool"
+ case reflect.Float32:
+ return "float32"
+ case reflect.Float64:
+ return "float64"
+ case reflect.Int:
+ return "int"
+ case reflect.Int8:
+ return "int8"
+ case reflect.Int16:
+ return "int16"
+ case reflect.Int32:
+ return "int32"
+ case reflect.Int64:
+ switch rt {
+ case typeDuration:
+ return "duration"
+ default:
+ return "int64"
+ }
+ case reflect.Interface, reflect.Ptr, reflect.Struct:
+ if rt == typeTime {
+ return "rfc3339"
+ }
+ switch kind {
+ case reflect.Ptr:
+ if rt.Elem() == typeTime {
+ return "rfc3339"
+ }
+ case reflect.Struct:
+ rt = reflect.PtrTo(rt)
+ }
+ if rt.Implements(typeTextUnmarshaler) {
+ return "value"
+ }
+ return ""
+ case reflect.Slice:
+ if slice {
+ // Only byte slices are supported as a potential slice type within a
+ // slice.
+ if rt.Elem().Kind() == reflect.Uint8 {
+ return "string"
+ }
+ return ""
+ }
+ if rt.Elem().Kind() == reflect.Uint8 {
+ return "string"
+ }
+ elem := getFlagType(rt.Elem(), true)
+ if elem == "" {
+ return elem
+ }
+ return "[]" + elem
+ case reflect.String:
+ return "string"
+ case reflect.Uint:
+ return "int"
+ case reflect.Uint8:
+ return "uint8"
+ case reflect.Uint16:
+ return "uint16"
+ case reflect.Uint32:
+ return "uint32"
+ case reflect.Uint64:
+ return "uint64"
+ default:
+ return ""
+ }
+}
+
+func isEnvChar(char byte) bool {
+ return char == '_' || (char >= 'A' && char <= 'Z') || (char >= '0' && char <= '9')
+}
+
+func isFlagChar(char byte) bool {
+ return char == '-' || (char >= 'a' && char <= 'z') || (char >= '0' && char <= '9')
}
func newContext(name string, cmd Command, args []string, parent *Context) (*Context, error) {
if err := validateName(name); err != nil {
return nil, err
}
if cmd == nil {
fname := name
if parent != nil {
fname = parent.FullName() + " " + name
}
return nil, fmt.Errorf("cli: the Command instance for %q is nil", fname)
}
c := &Context{
args: args,
cmd: cmd,
name: name,
}
if parent != nil {
c.parent = parent
c.root = parent.root
}
- if err := c.init(); err != nil {
- return nil, err
- }
return c, nil
}
func validateName(name string) error {
for i := 0; i < len(name); i++ {
char := name[i]
if char == '-' || (char >= 'a' && char <= 'z') {
continue
}
return fmt.Errorf("cli: invalid command name: %q", name)
}
return nil
}
|
espra/espra | 036a1c51392623c7d010e2321026ac362d65a1c8 | pkg/cli: start reworking optparse into a simpler API | diff --git a/pkg/cli/cli.go b/pkg/cli/cli.go
new file mode 100644
index 0000000..c8d20dd
--- /dev/null
+++ b/pkg/cli/cli.go
@@ -0,0 +1,220 @@
+// Public Domain (-) 2010-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// Package cli provides an easy way to build command line applications.
+package cli
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+
+ "web4.cc/pkg/process"
+)
+
+// Command specifies the minimal set of methods that a Command needs to
+// implement. Commands wishing to have more fine-grained control, can also
+// implement the Completer and Usage interfaces.
+type Command interface {
+ Info() string
+ Run(c *Context) error
+}
+
+// Completer defines the interface that a Command should implement if it wants
+// to provide custom autocompletion on command line arguments.
+type Completer interface {
+ Complete()
+}
+
+// Context provides a way to access processed command line info at specific
+// points within the command hierarchy.
+type Context struct {
+ args []string
+ cmd Command
+ name string
+ opts []*Option
+ parent *Context
+ root *Context
+ sub Subcommands
+}
+
+// Args returns the command line arguments for the current context.
+func (c *Context) Args() []string {
+ return clone(c.args)
+}
+
+// Command returns the Command associated with the current context. By doing a
+// type assertion on the returned value, this can be used to access field values
+// of the parent or root context.
+func (c *Context) Command() Command {
+ return c.cmd
+}
+
+// FullName returns the space separated sequence of command names, all the way
+// from the root to the current context.
+func (c *Context) FullName() string {
+ path := []string{c.name}
+ for c.parent != nil {
+ c = c.parent
+ path = append(path, c.name)
+ }
+ sort.Sort(sort.Reverse(sort.StringSlice(path)))
+ return strings.Join(path, " ")
+}
+
+// Name returns the command name for the current context.
+func (c *Context) Name() string {
+ return c.name
+}
+
+// Options returns the command line arguments for the current context.
+func (c *Context) Options() []*Option {
+ opts := make([]*Option, len(c.opts))
+ copy(opts, c.opts)
+ return opts
+}
+
+// Parent returns the parent of the current context.
+func (c *Context) Parent() *Context {
+ return c.parent
+}
+
+// Root returns the root context.
+func (c *Context) Root() *Context {
+ if c.root == nil {
+ return c
+ }
+ return c.root
+}
+
+// RootName returns the command name for the root context.
+func (c *Context) RootName() string {
+ if c.root == nil {
+ return c.name
+ }
+ return c.root.name
+}
+
+// Usage returns the generated usage for the current context.
+func (c *Context) Usage() string {
+ return c.usage()
+}
+
+// Option defines the command line option derived from a Command struct.
+type Option struct {
+ cmpl int
+ env []string
+ field int
+ help string
+ long []string
+ req bool
+ short []string
+}
+
+// Env returns the environment variables associated with the option.
+func (o *Option) Env() []string {
+ return clone(o.env)
+}
+
+// Help returns the help info for the option.
+func (o *Option) Help() string {
+ return o.help
+}
+
+// LongFlags returns the long flags associated with the option.
+func (o *Option) LongFlags() []string {
+ return clone(o.long)
+}
+
+// Required returns whether the option has been marked as required.
+func (o *Option) Required() bool {
+ return o.req
+}
+
+// ShortFlags returns the short flags associated with the option.
+func (o *Option) ShortFlags() []string {
+ return clone(o.short)
+}
+
+// Subcommands defines the field type for defining subcommands on a struct.
+type Subcommands map[string]Command
+
+// Usage defines the interface that a Command should implement if it wants
+// fine-grained control over the usage output. Otherwise, the usage is
+// auto-generated from the command name, Info() output, and struct fields.
+type Usage interface {
+ Usage(c *Context) string
+}
+
+// Version provides a default implementation to use as a subcommand to output
+// version info.
+type Version string
+
+func (v Version) Info() string {
+ return "Show the #{RootName} version info"
+}
+
+func (v Version) Run(c *Context) error {
+ fmt.Println(v)
+ return nil
+}
+
+type plain struct {
+ info string
+ run func(c *Context) error
+}
+
+func (p plain) Info() string {
+ return p.info
+}
+
+func (p plain) Run(c *Context) error {
+ return p.run(c)
+}
+
+// FromFunc will define a new Command from the given run function and info
+// string. It's useful for defining commands where there's no need to handle any
+// command line options.
+func FromFunc(run func(c *Context) error, info string) Command {
+ return plain{info, run}
+}
+
+// Run processes the command line arguments in the context of the given Command.
+// The given program name will be used to auto-generate usage text and error
+// messages.
+func Run(name string, cmd Command, args []string) error {
+ if len(args) < 1 {
+ return fmt.Errorf("cli: missing program name in the given args slice")
+ }
+ c, err := newContext(name, cmd, args, nil)
+ if err != nil {
+ return err
+ }
+ return c.run()
+}
+
+// RunThenExit provides a utility function for the common case of calling Run
+// with os.Args, printing the error on failure, and exiting with a status code
+// of 1 on failure, and 0 on success.
+//
+// The function will use process.Exit instead of os.Exit so that registered exit
+// handlers will be triggered.
+func RunThenExit(name string, cmd Command) {
+ err := Run(name, cmd, os.Args)
+ if err != nil {
+ printErrorf("%s failed: %s", name, err)
+ process.Exit(1)
+ }
+ process.Exit(0)
+}
+
+func clone(xs []string) []string {
+ ys := make([]string, len(xs))
+ copy(ys, xs)
+ return ys
+}
+
+func printErrorf(format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, format+"\n", args...)
+}
diff --git a/pkg/cli/context.go b/pkg/cli/context.go
new file mode 100644
index 0000000..5df05b8
--- /dev/null
+++ b/pkg/cli/context.go
@@ -0,0 +1,170 @@
+// Public Domain (-) 2010-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package cli
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "web4.cc/pkg/ident"
+)
+
+var typeSubcommands = reflect.TypeOf(Subcommands{})
+
+func (c *Context) init() error {
+ ptr := false
+ rv := reflect.ValueOf(c.cmd)
+ typ := rv.Type()
+ if rv.Kind() == reflect.Ptr {
+ ptr = true
+ rv = rv.Elem()
+ }
+ // Extract the subcommands mapping if a field with the right name and type
+ // exists on a struct.
+ rt := rv.Type()
+ if rv.Kind() == reflect.Struct {
+ field, ok := rt.FieldByName("Subcommands")
+ if ok && field.Type == typeSubcommands {
+ c.sub = rv.FieldByName("Subcommands").Interface().(Subcommands)
+ }
+ } else {
+ ptr = false
+ }
+ // Skip processing of options if the command isn't a struct pointer.
+ if !ptr {
+ return nil
+ }
+ // Process command line options from the struct definition.
+ flen := rt.NumField()
+ for i := 0; i < flen; i++ {
+ field := rt.Field(i)
+ tag := field.Tag
+ // Skip invalid fields.
+ if field.PkgPath != "" || field.Anonymous || tag == "" {
+ continue
+ }
+ // Process the field name.
+ name, err := ident.FromPascal(field.Name)
+ if err != nil {
+ return fmt.Errorf(
+ "cli: could not convert field name %q on %s: %s",
+ field.Name, rt, err,
+ )
+ }
+ // Set defaults.
+ cmpl := -1
+ env := []string{name.ToScreamingSnake()}
+ long := []string{name.ToKebab()}
+ required := false
+ short := []string{}
+ // Process the flags tag.
+ for _, flag := range strings.Split(tag.Get("flags"), " ") {
+ if strings.TrimSpace(flag) == "" {
+ continue
+ }
+ if strings.ToLower(flag) != flag {
+ return fmt.Errorf(
+ "cli: invalid flag %q found for field name %q on %s",
+ flag, field.Name, rt,
+ )
+ }
+ if strings.HasPrefix(flag, "--") && len(flag) >= 4 {
+ long = append(long, flag[2:])
+ continue
+ }
+ if strings.HasPrefix(flag, "-") && len(flag) == 2 {
+ short = append(short, flag[1:])
+ continue
+ }
+ return fmt.Errorf(
+ "cli: invalid flag %q found for field name %q on %s",
+ flag, field.Name, rt,
+ )
+ }
+ // Process the opts tag.
+ for _, opt := range strings.Split(tag.Get("opts"), ",") {
+ if strings.TrimSpace(opt) == "" {
+ continue
+ }
+ if opt == "required" {
+ required = true
+ continue
+ }
+ if opt == strings.ToUpper(opt) {
+ env = append(env, opt)
+ continue
+ }
+ meth, ok := typ.MethodByName(opt)
+ if !ok {
+ return fmt.Errorf(
+ "cli: could not find method %q for completing field name %q on %s",
+ opt, field.Name, rt,
+ )
+ }
+ if cmpl != -1 {
+ return fmt.Errorf(
+ "cli: completer already set for field name %q on %s",
+ field.Name, rt,
+ )
+ }
+ cmpl = meth.Index
+ }
+ c.opts = append(c.opts, &Option{
+ cmpl: cmpl,
+ env: env,
+ field: i,
+ help: tag.Get("help"),
+ long: long,
+ req: required,
+ short: short,
+ })
+ }
+ return nil
+}
+
+func (c *Context) run() error {
+ return c.cmd.Run(c)
+}
+
+func (c *Context) usage() string {
+ return ""
+}
+
+func newContext(name string, cmd Command, args []string, parent *Context) (*Context, error) {
+ if err := validateName(name); err != nil {
+ return nil, err
+ }
+ if cmd == nil {
+ fname := name
+ if parent != nil {
+ fname = parent.FullName() + " " + name
+ }
+ return nil, fmt.Errorf("cli: the Command instance for %q is nil", fname)
+ }
+ c := &Context{
+ args: args,
+ cmd: cmd,
+ name: name,
+ }
+ if parent != nil {
+ c.parent = parent
+ c.root = parent.root
+ }
+ if err := c.init(); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+func validateName(name string) error {
+ for i := 0; i < len(name); i++ {
+ char := name[i]
+ if char == '-' || (char >= 'a' && char <= 'z') {
+ continue
+ }
+ return fmt.Errorf("cli: invalid command name: %q", name)
+ }
+ return nil
+}
|
espra/espra | 48c40820605ae3cf765365ace9861b78a43fe7f6 | pkg/process: expose os.Exit as a configurable variable | diff --git a/pkg/process/process.go b/pkg/process/process.go
index 84f7609..f800957 100644
--- a/pkg/process/process.go
+++ b/pkg/process/process.go
@@ -1,210 +1,213 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// Package process provides utilities for managing the current system process.
package process
import (
"context"
"fmt"
"os"
"os/signal"
"path/filepath"
"sync"
"syscall"
)
+// OSExit is the function used to terminate the current process. It defaults to
+// os.Exit, but can be overridden for testing purposes.
+var OSExit = os.Exit
+
var (
- exit = os.Exit
exitDisabled bool
exiting bool
mu sync.RWMutex // protects exitDisabled, exiting, registry
registry = map[os.Signal][]func(){}
testMode = false
testSig = make(chan struct{}, 10)
wait = make(chan struct{})
)
type lockFile struct {
file string
link string
}
func (l *lockFile) release() {
os.Remove(l.file)
os.Remove(l.link)
}
// CreatePIDFile writes the current process ID to a new file at the given path.
// The written file is removed when Exit is called, or when the process receives
// an os.Interrupt or SIGTERM signal.
func CreatePIDFile(path string) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0o660)
if err != nil {
return err
}
fmt.Fprintf(f, "%d", os.Getpid())
err = f.Close()
if err == nil {
SetExitHandler(func() {
os.Remove(path)
})
}
return err
}
// DisableAutoExit will prevent the process from automatically exiting after
// processing os.Interrupt or SIGTERM signals. This will not be enforced if Exit
// is called directly.
func DisableAutoExit() {
mu.Lock()
exitDisabled = true
mu.Unlock()
}
// Exit runs the registered exit handlers, as if the os.Interrupt signal had
// been sent, and then terminates the process with the given status code. Exit
// blocks until the process terminates if it has already been called elsewhere.
func Exit(code int) {
mu.Lock()
if exiting {
mu.Unlock()
if testMode {
testSig <- struct{}{}
}
<-wait
return
}
exiting = true
handlers := clone(registry[os.Interrupt])
mu.Unlock()
for _, handler := range handlers {
handler()
}
- exit(code)
+ OSExit(code)
}
// Init tries to acquire a process lock and write the PID file for the current
// process.
func Init(directory string, name string) error {
if err := Lock(directory, name); err != nil {
return err
}
return CreatePIDFile(filepath.Join(directory, name+".pid"))
}
// Lock tries to acquire a process lock in the given directory. The acquired
// lock file is released when Exit is called, or when the process receives an
// os.Interrupt or SIGTERM signal.
//
// This function has only been tested for correctness on Unix systems with
// filesystems where link is atomic. It may not work as expected on NFS mounts
// or on platforms like Windows.
func Lock(directory string, name string) error {
file := filepath.Join(directory, fmt.Sprintf("%s-%d.lock", name, os.Getpid()))
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY, 0o660)
if err != nil {
return err
}
f.Close()
link := filepath.Join(directory, name+".lock")
err = os.Link(file, link)
if err != nil {
// We don't remove the lock file here so that calling Lock multiple
// times from the same process doesn't remove an existing lock.
return err
}
l := &lockFile{
file: file,
link: link,
}
SetExitHandler(l.release)
return nil
}
// ReapOrphans reaps orphaned child processes and returns whether there are any
// unterminated child processes that are still active.
//
// This is currently a no-op on all platforms except Linux.
func ReapOrphans() bool {
return reap()
}
// ResetHandlers drops all currently registered handlers.
func ResetHandlers() {
mu.Lock()
registry = map[os.Signal][]func(){}
mu.Unlock()
}
// RunReaper continuously attempts to reap orphaned child processes until the
// given context is cancelled.
//
// On Linux, this will register the current process as a child subreaper, and
// attempt to reap child processes whenever SIGCHLD is received. On all other
// platforms, this is currently a no-op.
func RunReaper(ctx context.Context) {
runReaper(ctx)
}
// SetExitHandler registers the given handler function to run when receiving
// os.Interrupt or SIGTERM signals. Registered handlers are executed in reverse
// order of when they were set.
func SetExitHandler(handler func()) {
mu.Lock()
registry[os.Interrupt] = prepend(registry[os.Interrupt], handler)
registry[syscall.SIGTERM] = prepend(registry[syscall.SIGTERM], handler)
mu.Unlock()
}
// SetSignalHandler registers the given handler function to run when receiving
// the specified signal. Registered handlers are executed in reverse order of
// when they were set.
func SetSignalHandler(signal os.Signal, handler func()) {
mu.Lock()
registry[signal] = prepend(registry[signal], handler)
mu.Unlock()
}
func clone(xs []func()) []func() {
ys := make([]func(), len(xs))
copy(ys, xs)
return ys
}
func handleSignals() {
notifier := make(chan os.Signal, 100)
signal.Notify(notifier)
go func() {
for sig := range notifier {
mu.Lock()
disabled := exitDisabled
if !disabled {
if sig == syscall.SIGTERM || sig == os.Interrupt {
exiting = true
}
}
handlers := clone(registry[sig])
mu.Unlock()
for _, handler := range handlers {
handler()
}
if !disabled {
if sig == syscall.SIGTERM || sig == os.Interrupt {
- exit(1)
+ OSExit(1)
}
}
if testMode {
testSig <- struct{}{}
}
}
}()
}
func prepend(xs []func(), handler func()) []func() {
return append([]func(){handler}, xs...)
}
func init() {
handleSignals()
}
diff --git a/pkg/process/process_test.go b/pkg/process/process_test.go
index c301ada..facc682 100644
--- a/pkg/process/process_test.go
+++ b/pkg/process/process_test.go
@@ -1,198 +1,198 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
package process
import (
"fmt"
"os"
"path/filepath"
"strconv"
"syscall"
"testing"
"web4.cc/pkg/osexit"
)
func TestCreatePIDFile(t *testing.T) {
reset()
dir := mktemp(t)
defer os.RemoveAll(dir)
fpath := filepath.Join(dir, "test.pid")
err := CreatePIDFile(fpath)
if err != nil {
t.Fatalf("Unexpected error creating PID file: %s", err)
}
written, err := os.ReadFile(fpath)
if err != nil {
t.Fatalf("Unexpected error reading PID file: %s", err)
}
expected := os.Getpid()
pid, err := strconv.ParseInt(string(written), 10, 64)
if err != nil {
t.Fatalf("Unexpected error parsing PID file contents as an int: %s", err)
}
if int(pid) != expected {
t.Fatalf("Mismatching PID file contents: got %d, want %d", int(pid), expected)
}
Exit(2)
if !osexit.Called() || osexit.Status() != 2 {
t.Fatalf("Exit call did not behave as expected")
}
_, err = os.Stat(fpath)
if err == nil {
t.Fatalf("Calling Exit did not remove the created PID file as expected")
}
if !os.IsNotExist(err) {
t.Fatalf("Calling Exit did not remove the created PID file as expected, got error: %s", err)
}
fpath = filepath.Join(dir+"-nonexistent-directory", "test.pid")
err = CreatePIDFile(fpath)
if err == nil {
t.Fatalf("Expected an error when creating PID file in a non-existent directory")
}
}
func TestDisableDefaultExit(t *testing.T) {
reset()
called := false
SetExitHandler(func() {
called = true
})
send(syscall.SIGTERM)
if !osexit.Called() {
t.Fatalf("os.Exit was not called on SIGTERM")
}
if !called {
t.Fatalf("Exit handler not run on SIGTERM")
}
DisableAutoExit()
osexit.Reset()
called = false
resetExiting()
send(syscall.SIGTERM)
if osexit.Called() {
t.Fatalf("os.Exit was called on SIGTERM even after DisableAutoExit()")
}
if !called {
t.Fatalf("Exit handler not run on SIGTERM after DisableAutoExit")
}
}
func TestExit(t *testing.T) {
reset()
called := false
SetExitHandler(func() {
called = true
})
Exit(7)
if !osexit.Called() {
t.Fatalf("Exit did not call os.Exit")
}
status := osexit.Status()
if status != 7 {
t.Fatalf("Exit did not set the right status code: got %d, want 7", status)
}
if !called {
t.Fatalf("Exit handler was not run when calling Exit")
}
osexit.Reset()
called = false
go func() {
Exit(8)
}()
<-testSig
wait <- struct{}{}
if osexit.Called() {
t.Fatalf("Second call to Exit called os.Exit")
}
if called {
t.Fatalf("Second call to Exit resulted in Exit handler being run again")
}
}
func TestInit(t *testing.T) {
dir := mktemp(t)
defer os.RemoveAll(dir)
err := Init(dir, "web4")
if err != nil {
t.Fatalf("Unexpected error initialising process: %s", err)
}
err = Init(dir+"-nonexistent-directory", "web4")
if err == nil {
t.Fatalf("Expected an error when calling Init in a non-existing directory")
}
}
func TestLock(t *testing.T) {
reset()
dir := mktemp(t)
defer os.RemoveAll(dir)
err := Lock(dir, "web4")
if err != nil {
t.Fatalf("Unexpected error acquiring Lock: %s", err)
}
err = Lock(dir, "web4")
if err == nil {
t.Fatalf("Expected an error when calling Lock on an already locked path")
}
fpath := filepath.Join(dir, fmt.Sprintf("web4-%d.lock", os.Getpid()))
_, err = os.Stat(fpath)
if err != nil {
t.Fatalf("Unexpected error accessing the raw lock file: %s", err)
}
Exit(2)
_, err = os.Stat(fpath)
if err == nil {
t.Fatalf("Calling Exit did not remove the lock file as expected")
}
if !os.IsNotExist(err) {
t.Fatalf("Calling Exit did not remove the lock file as expected, got error: %s", err)
}
err = Lock(dir+"-nonexistent-directory", "web4")
if err == nil {
t.Fatalf("Expected an error when calling Lock in a non-existing directory")
}
}
func TestSignalHandler(t *testing.T) {
reset()
called := false
SetSignalHandler(syscall.SIGHUP, func() {
called = true
})
send(syscall.SIGABRT)
if called {
t.Fatalf("Signal handler erroneously called on SIGABRT")
}
send(syscall.SIGHUP)
if !called {
t.Fatalf("Signal handler not called on SIGHUP")
}
}
func mktemp(t *testing.T) string {
dir, err := os.MkdirTemp("", "web4-process")
if err != nil {
t.Skipf("Unable to create temporary directory for tests: %s", err)
}
return dir
}
func reset() {
- exit = osexit.Set()
+ OSExit = osexit.Set()
testMode = true
ResetHandlers()
resetExiting()
}
func resetExiting() {
mu.Lock()
exiting = false
mu.Unlock()
}
func send(sig syscall.Signal) {
syscall.Kill(syscall.Getpid(), sig)
<-testSig
}
|
espra/espra | c89d1615207beee3c3e29b6e14d9582c1283ea4c | pkg/process: add support for reaping child processes | diff --git a/pkg/process/process.go b/pkg/process/process.go
index 60aa2f0..84f7609 100644
--- a/pkg/process/process.go
+++ b/pkg/process/process.go
@@ -1,191 +1,210 @@
// Public Domain (-) 2010-present, The Web4 Authors.
// See the Web4 UNLICENSE file for details.
// Package process provides utilities for managing the current system process.
package process
import (
+ "context"
"fmt"
"os"
"os/signal"
"path/filepath"
"sync"
"syscall"
)
var (
exit = os.Exit
exitDisabled bool
exiting bool
mu sync.RWMutex // protects exitDisabled, exiting, registry
registry = map[os.Signal][]func(){}
testMode = false
testSig = make(chan struct{}, 10)
wait = make(chan struct{})
)
type lockFile struct {
file string
link string
}
func (l *lockFile) release() {
os.Remove(l.file)
os.Remove(l.link)
}
// CreatePIDFile writes the current process ID to a new file at the given path.
// The written file is removed when Exit is called, or when the process receives
// an os.Interrupt or SIGTERM signal.
func CreatePIDFile(path string) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0o660)
if err != nil {
return err
}
fmt.Fprintf(f, "%d", os.Getpid())
err = f.Close()
if err == nil {
SetExitHandler(func() {
os.Remove(path)
})
}
return err
}
// DisableAutoExit will prevent the process from automatically exiting after
// processing os.Interrupt or SIGTERM signals. This will not be enforced if Exit
// is called directly.
func DisableAutoExit() {
mu.Lock()
exitDisabled = true
mu.Unlock()
}
// Exit runs the registered exit handlers, as if the os.Interrupt signal had
// been sent, and then terminates the process with the given status code. Exit
// blocks until the process terminates if it has already been called elsewhere.
func Exit(code int) {
mu.Lock()
if exiting {
mu.Unlock()
if testMode {
testSig <- struct{}{}
}
<-wait
return
}
exiting = true
handlers := clone(registry[os.Interrupt])
mu.Unlock()
for _, handler := range handlers {
handler()
}
exit(code)
}
// Init tries to acquire a process lock and write the PID file for the current
// process.
func Init(directory string, name string) error {
if err := Lock(directory, name); err != nil {
return err
}
return CreatePIDFile(filepath.Join(directory, name+".pid"))
}
// Lock tries to acquire a process lock in the given directory. The acquired
// lock file is released when Exit is called, or when the process receives an
// os.Interrupt or SIGTERM signal.
//
// This function has only been tested for correctness on Unix systems with
// filesystems where link is atomic. It may not work as expected on NFS mounts
// or on platforms like Windows.
func Lock(directory string, name string) error {
file := filepath.Join(directory, fmt.Sprintf("%s-%d.lock", name, os.Getpid()))
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY, 0o660)
if err != nil {
return err
}
f.Close()
link := filepath.Join(directory, name+".lock")
err = os.Link(file, link)
if err != nil {
// We don't remove the lock file here so that calling Lock multiple
// times from the same process doesn't remove an existing lock.
return err
}
l := &lockFile{
file: file,
link: link,
}
SetExitHandler(l.release)
return nil
}
+// ReapOrphans reaps orphaned child processes and returns whether there are any
+// unterminated child processes that are still active.
+//
+// This is currently a no-op on all platforms except Linux.
+func ReapOrphans() bool {
+ return reap()
+}
+
// ResetHandlers drops all currently registered handlers.
func ResetHandlers() {
mu.Lock()
registry = map[os.Signal][]func(){}
mu.Unlock()
}
+// RunReaper continuously attempts to reap orphaned child processes until the
+// given context is cancelled.
+//
+// On Linux, this will register the current process as a child subreaper, and
+// attempt to reap child processes whenever SIGCHLD is received. On all other
+// platforms, this is currently a no-op.
+func RunReaper(ctx context.Context) {
+ runReaper(ctx)
+}
+
// SetExitHandler registers the given handler function to run when receiving
// os.Interrupt or SIGTERM signals. Registered handlers are executed in reverse
// order of when they were set.
func SetExitHandler(handler func()) {
mu.Lock()
registry[os.Interrupt] = prepend(registry[os.Interrupt], handler)
registry[syscall.SIGTERM] = prepend(registry[syscall.SIGTERM], handler)
mu.Unlock()
}
// SetSignalHandler registers the given handler function to run when receiving
// the specified signal. Registered handlers are executed in reverse order of
// when they were set.
func SetSignalHandler(signal os.Signal, handler func()) {
mu.Lock()
registry[signal] = prepend(registry[signal], handler)
mu.Unlock()
}
func clone(xs []func()) []func() {
ys := make([]func(), len(xs))
copy(ys, xs)
return ys
}
func handleSignals() {
notifier := make(chan os.Signal, 100)
signal.Notify(notifier)
go func() {
for sig := range notifier {
mu.Lock()
disabled := exitDisabled
if !disabled {
if sig == syscall.SIGTERM || sig == os.Interrupt {
exiting = true
}
}
handlers := clone(registry[sig])
mu.Unlock()
for _, handler := range handlers {
handler()
}
if !disabled {
if sig == syscall.SIGTERM || sig == os.Interrupt {
exit(1)
}
}
if testMode {
testSig <- struct{}{}
}
}
}()
}
func prepend(xs []func(), handler func()) []func() {
return append([]func(){handler}, xs...)
}
func init() {
handleSignals()
}
diff --git a/pkg/process/reap.go b/pkg/process/reap.go
new file mode 100644
index 0000000..18bd050
--- /dev/null
+++ b/pkg/process/reap.go
@@ -0,0 +1,17 @@
+// Public Domain (-) 2018-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// +build !linux
+
+package process
+
+import (
+ "context"
+)
+
+func reap() bool {
+ return false
+}
+
+func runReaper(ctx context.Context) {
+}
diff --git a/pkg/process/reap_linux.go b/pkg/process/reap_linux.go
new file mode 100644
index 0000000..7ca2c25
--- /dev/null
+++ b/pkg/process/reap_linux.go
@@ -0,0 +1,47 @@
+// Public Domain (-) 2018-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package process
+
+import (
+ "context"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func reap() bool {
+ status := syscall.WaitStatus(0)
+ for {
+ pid, err := syscall.Wait4(-1, &status, unix.WNOHANG, nil)
+ if pid == 0 {
+ return true
+ }
+ if pid == -1 && err == syscall.ECHILD {
+ return false
+ }
+ }
+}
+
+func runReaper(ctx context.Context) {
+ if os.Getpid() != 1 {
+ unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(1), 0, 0, 0)
+ }
+ notifier := make(chan os.Signal, 4096)
+ signal.Notify(notifier, syscall.SIGCHLD)
+outer:
+ for {
+ select {
+ case <-notifier:
+ reap()
+ if testMode {
+ testSig <- struct{}{}
+ }
+ case <-ctx.Done():
+ signal.Stop(notifier)
+ break outer
+ }
+ }
+}
diff --git a/pkg/process/reap_test.go b/pkg/process/reap_test.go
new file mode 100644
index 0000000..b14101f
--- /dev/null
+++ b/pkg/process/reap_test.go
@@ -0,0 +1,47 @@
+// Public Domain (-) 2018-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package process
+
+import (
+ "context"
+ "os/exec"
+ "runtime"
+ "syscall"
+ "testing"
+)
+
+func TestReapOrphans(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ ReapOrphans()
+ return
+ }
+ testMode = true
+ cmd := exec.Command("sleep", "100")
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("Unexpected error when trying to run `sleep 100`: %s", err)
+ }
+ if more := ReapOrphans(); !more {
+ t.Fatalf("Failed to find unterminated child process when calling ReapOrphans")
+ }
+ syscall.Kill(cmd.Process.Pid, syscall.SIGTERM)
+ if more := ReapOrphans(); more {
+ t.Fatalf("Unterminated child process encountered when calling ReapOrphans")
+ }
+}
+
+func TestRunReaper(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ go RunReaper(ctx)
+ defer cancel()
+ if runtime.GOOS != "linux" {
+ return
+ }
+ testMode = true
+ cmd := exec.Command("sleep", "100")
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("Unexpected error when trying to run `sleep 100`: %s", err)
+ }
+ syscall.Kill(cmd.Process.Pid, syscall.SIGTERM)
+ <-testSig
+}
|
espra/espra | 9d7e07ee03e5de5b95d5c210cf99f756c4a8e3ba | go: update dependencies | diff --git a/go.mod b/go.mod
index a8a8188..d0b5ced 100644
--- a/go.mod
+++ b/go.mod
@@ -1,3 +1,5 @@
module web4.cc
go 1.16
+
+require golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..7503251
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
espra/espra | 6ada209563c7b7de00a1c522c47f72ae951e7918 | pkg/process: add support for managing the current process | diff --git a/pkg/process/process.go b/pkg/process/process.go
new file mode 100644
index 0000000..60aa2f0
--- /dev/null
+++ b/pkg/process/process.go
@@ -0,0 +1,191 @@
+// Public Domain (-) 2010-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// Package process provides utilities for managing the current system process.
+package process
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "sync"
+ "syscall"
+)
+
+var (
+ exit = os.Exit
+ exitDisabled bool
+ exiting bool
+ mu sync.RWMutex // protects exitDisabled, exiting, registry
+ registry = map[os.Signal][]func(){}
+ testMode = false
+ testSig = make(chan struct{}, 10)
+ wait = make(chan struct{})
+)
+
+type lockFile struct {
+ file string
+ link string
+}
+
+func (l *lockFile) release() {
+ os.Remove(l.file)
+ os.Remove(l.link)
+}
+
+// CreatePIDFile writes the current process ID to a new file at the given path.
+// The written file is removed when Exit is called, or when the process receives
+// an os.Interrupt or SIGTERM signal.
+func CreatePIDFile(path string) error {
+ f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0o660)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(f, "%d", os.Getpid())
+ err = f.Close()
+ if err == nil {
+ SetExitHandler(func() {
+ os.Remove(path)
+ })
+ }
+ return err
+}
+
+// DisableAutoExit will prevent the process from automatically exiting after
+// processing os.Interrupt or SIGTERM signals. This will not be enforced if Exit
+// is called directly.
+func DisableAutoExit() {
+ mu.Lock()
+ exitDisabled = true
+ mu.Unlock()
+}
+
+// Exit runs the registered exit handlers, as if the os.Interrupt signal had
+// been sent, and then terminates the process with the given status code. Exit
+// blocks until the process terminates if it has already been called elsewhere.
+func Exit(code int) {
+ mu.Lock()
+ if exiting {
+ mu.Unlock()
+ if testMode {
+ testSig <- struct{}{}
+ }
+ <-wait
+ return
+ }
+ exiting = true
+ handlers := clone(registry[os.Interrupt])
+ mu.Unlock()
+ for _, handler := range handlers {
+ handler()
+ }
+ exit(code)
+}
+
+// Init tries to acquire a process lock and write the PID file for the current
+// process.
+func Init(directory string, name string) error {
+ if err := Lock(directory, name); err != nil {
+ return err
+ }
+ return CreatePIDFile(filepath.Join(directory, name+".pid"))
+}
+
+// Lock tries to acquire a process lock in the given directory. The acquired
+// lock file is released when Exit is called, or when the process receives an
+// os.Interrupt or SIGTERM signal.
+//
+// This function has only been tested for correctness on Unix systems with
+// filesystems where link is atomic. It may not work as expected on NFS mounts
+// or on platforms like Windows.
+func Lock(directory string, name string) error {
+ file := filepath.Join(directory, fmt.Sprintf("%s-%d.lock", name, os.Getpid()))
+ f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY, 0o660)
+ if err != nil {
+ return err
+ }
+ f.Close()
+ link := filepath.Join(directory, name+".lock")
+ err = os.Link(file, link)
+ if err != nil {
+ // We don't remove the lock file here so that calling Lock multiple
+ // times from the same process doesn't remove an existing lock.
+ return err
+ }
+ l := &lockFile{
+ file: file,
+ link: link,
+ }
+ SetExitHandler(l.release)
+ return nil
+}
+
+// ResetHandlers drops all currently registered handlers.
+func ResetHandlers() {
+ mu.Lock()
+ registry = map[os.Signal][]func(){}
+ mu.Unlock()
+}
+
+// SetExitHandler registers the given handler function to run when receiving
+// os.Interrupt or SIGTERM signals. Registered handlers are executed in reverse
+// order of when they were set.
+func SetExitHandler(handler func()) {
+ mu.Lock()
+ registry[os.Interrupt] = prepend(registry[os.Interrupt], handler)
+ registry[syscall.SIGTERM] = prepend(registry[syscall.SIGTERM], handler)
+ mu.Unlock()
+}
+
+// SetSignalHandler registers the given handler function to run when receiving
+// the specified signal. Registered handlers are executed in reverse order of
+// when they were set.
+func SetSignalHandler(signal os.Signal, handler func()) {
+ mu.Lock()
+ registry[signal] = prepend(registry[signal], handler)
+ mu.Unlock()
+}
+
+func clone(xs []func()) []func() {
+ ys := make([]func(), len(xs))
+ copy(ys, xs)
+ return ys
+}
+
+func handleSignals() {
+ notifier := make(chan os.Signal, 100)
+ signal.Notify(notifier)
+ go func() {
+ for sig := range notifier {
+ mu.Lock()
+ disabled := exitDisabled
+ if !disabled {
+ if sig == syscall.SIGTERM || sig == os.Interrupt {
+ exiting = true
+ }
+ }
+ handlers := clone(registry[sig])
+ mu.Unlock()
+ for _, handler := range handlers {
+ handler()
+ }
+ if !disabled {
+ if sig == syscall.SIGTERM || sig == os.Interrupt {
+ exit(1)
+ }
+ }
+ if testMode {
+ testSig <- struct{}{}
+ }
+ }
+ }()
+}
+
+func prepend(xs []func(), handler func()) []func() {
+ return append([]func(){handler}, xs...)
+}
+
+func init() {
+ handleSignals()
+}
diff --git a/pkg/process/process_test.go b/pkg/process/process_test.go
new file mode 100644
index 0000000..c301ada
--- /dev/null
+++ b/pkg/process/process_test.go
@@ -0,0 +1,198 @@
+// Public Domain (-) 2010-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package process
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "syscall"
+ "testing"
+
+ "web4.cc/pkg/osexit"
+)
+
+func TestCreatePIDFile(t *testing.T) {
+ reset()
+ dir := mktemp(t)
+ defer os.RemoveAll(dir)
+ fpath := filepath.Join(dir, "test.pid")
+ err := CreatePIDFile(fpath)
+ if err != nil {
+ t.Fatalf("Unexpected error creating PID file: %s", err)
+ }
+ written, err := os.ReadFile(fpath)
+ if err != nil {
+ t.Fatalf("Unexpected error reading PID file: %s", err)
+ }
+ expected := os.Getpid()
+ pid, err := strconv.ParseInt(string(written), 10, 64)
+ if err != nil {
+ t.Fatalf("Unexpected error parsing PID file contents as an int: %s", err)
+ }
+ if int(pid) != expected {
+ t.Fatalf("Mismatching PID file contents: got %d, want %d", int(pid), expected)
+ }
+ Exit(2)
+ if !osexit.Called() || osexit.Status() != 2 {
+ t.Fatalf("Exit call did not behave as expected")
+ }
+ _, err = os.Stat(fpath)
+ if err == nil {
+ t.Fatalf("Calling Exit did not remove the created PID file as expected")
+ }
+ if !os.IsNotExist(err) {
+ t.Fatalf("Calling Exit did not remove the created PID file as expected, got error: %s", err)
+ }
+ fpath = filepath.Join(dir+"-nonexistent-directory", "test.pid")
+ err = CreatePIDFile(fpath)
+ if err == nil {
+ t.Fatalf("Expected an error when creating PID file in a non-existent directory")
+ }
+}
+
+func TestDisableDefaultExit(t *testing.T) {
+ reset()
+ called := false
+ SetExitHandler(func() {
+ called = true
+ })
+ send(syscall.SIGTERM)
+ if !osexit.Called() {
+ t.Fatalf("os.Exit was not called on SIGTERM")
+ }
+ if !called {
+ t.Fatalf("Exit handler not run on SIGTERM")
+ }
+ DisableAutoExit()
+ osexit.Reset()
+ called = false
+ resetExiting()
+ send(syscall.SIGTERM)
+ if osexit.Called() {
+ t.Fatalf("os.Exit was called on SIGTERM even after DisableAutoExit()")
+ }
+ if !called {
+ t.Fatalf("Exit handler not run on SIGTERM after DisableAutoExit")
+ }
+}
+
+func TestExit(t *testing.T) {
+ reset()
+ called := false
+ SetExitHandler(func() {
+ called = true
+ })
+ Exit(7)
+ if !osexit.Called() {
+ t.Fatalf("Exit did not call os.Exit")
+ }
+ status := osexit.Status()
+ if status != 7 {
+ t.Fatalf("Exit did not set the right status code: got %d, want 7", status)
+ }
+ if !called {
+ t.Fatalf("Exit handler was not run when calling Exit")
+ }
+ osexit.Reset()
+ called = false
+ go func() {
+ Exit(8)
+ }()
+ <-testSig
+ wait <- struct{}{}
+ if osexit.Called() {
+ t.Fatalf("Second call to Exit called os.Exit")
+ }
+ if called {
+ t.Fatalf("Second call to Exit resulted in Exit handler being run again")
+ }
+}
+
+func TestInit(t *testing.T) {
+ dir := mktemp(t)
+ defer os.RemoveAll(dir)
+ err := Init(dir, "web4")
+ if err != nil {
+ t.Fatalf("Unexpected error initialising process: %s", err)
+ }
+ err = Init(dir+"-nonexistent-directory", "web4")
+ if err == nil {
+ t.Fatalf("Expected an error when calling Init in a non-existing directory")
+ }
+}
+
+func TestLock(t *testing.T) {
+ reset()
+ dir := mktemp(t)
+ defer os.RemoveAll(dir)
+ err := Lock(dir, "web4")
+ if err != nil {
+ t.Fatalf("Unexpected error acquiring Lock: %s", err)
+ }
+ err = Lock(dir, "web4")
+ if err == nil {
+ t.Fatalf("Expected an error when calling Lock on an already locked path")
+ }
+ fpath := filepath.Join(dir, fmt.Sprintf("web4-%d.lock", os.Getpid()))
+ _, err = os.Stat(fpath)
+ if err != nil {
+ t.Fatalf("Unexpected error accessing the raw lock file: %s", err)
+ }
+ Exit(2)
+ _, err = os.Stat(fpath)
+ if err == nil {
+ t.Fatalf("Calling Exit did not remove the lock file as expected")
+ }
+ if !os.IsNotExist(err) {
+ t.Fatalf("Calling Exit did not remove the lock file as expected, got error: %s", err)
+ }
+ err = Lock(dir+"-nonexistent-directory", "web4")
+ if err == nil {
+ t.Fatalf("Expected an error when calling Lock in a non-existing directory")
+ }
+}
+
+func TestSignalHandler(t *testing.T) {
+ reset()
+ called := false
+ SetSignalHandler(syscall.SIGHUP, func() {
+ called = true
+ })
+ send(syscall.SIGABRT)
+ if called {
+ t.Fatalf("Signal handler erroneously called on SIGABRT")
+ }
+ send(syscall.SIGHUP)
+ if !called {
+ t.Fatalf("Signal handler not called on SIGHUP")
+ }
+}
+
+func mktemp(t *testing.T) string {
+ dir, err := os.MkdirTemp("", "web4-process")
+ if err != nil {
+ t.Skipf("Unable to create temporary directory for tests: %s", err)
+ }
+ return dir
+}
+
+func reset() {
+ exit = osexit.Set()
+ testMode = true
+ ResetHandlers()
+ resetExiting()
+}
+
+func resetExiting() {
+ mu.Lock()
+ exiting = false
+ mu.Unlock()
+}
+
+func send(sig syscall.Signal) {
+ syscall.Kill(syscall.Getpid(), sig)
+ <-testSig
+}
|
espra/espra | 68521c926eb0266b4a94df13d3be1b6be666da19 | SECURITY: add an explicit policy with 30-day disclosure | diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..be75c02
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,58 @@
+# Security Policy
+
+We care about security. Please report any security-related issues by emailing
[email protected].
+
+## Disclosure Timeline
+
+- We will aim to respond to your initial report as soon as possible.
+
+- If, for some reason, we haven't responded to your report within 24 hours,
+ please try to get hold of a member of the security team by asking on
+ [Slack](https://web4.cc/slack).
+
+- Once a member of the security team has reviewed your report, they may ask you
+ for more info to better understand the issue.
+
+- Once the security team has all the necessary info, they will make an
+ assessment and respond to you via email on whether it is determined by us to
+ be a valid bug or not.
+
+- Once the issue has been accepted as a valid bug, we ask that you give us 30
+ days to fix the issue, after which you are welcome to publicly disclose the
+ issue.
+
+- On the other hand, if the security team determines the issue to be invalid,
+ you are welcome to publicly disclose it whenever you want.
+
+## Responsible Disclosure Policy
+
+We will not initiate a lawsuit or law enforcement investigation against you in
+response to your report, as long as you:
+
+- Don't publicly disclose an issue until it has either been explicitly assessed
+ to be invalid by our security team, or 30 days have passed since it was
+ acknowledged as a valid issue by our security team.
+
+- Don't attempt to gain access to another user's account or data, or the data or
+ infrastructure of a host.
+
+- Don't exploit a security issue for any reason.
+
+- Don't perform any attacks that could impact the reliability or integrity of
+ the network/platform, our services, or data, e.g. denial of service attacks,
+ spam attacks, data corruption, &c.
+
+- Never conduct any non-technical attacks against us, our collaborators, our
+ users, or our infrastructure, e.g. phishing, social engineering, physical
+ assault, &c.
+
+## Bug Bounty Program
+
+Due to limited resources, we do not currently offer any form of monetary reward
+for the reporting of bugs. We hope to be able to do so in the future as our
+finances improve.
+
+In the meantime, we will publicly recognise the reporters of all acknowledged
+security issues by listing their name and details on a dedicated project page.
+Please let us know these details when you report the bug. Thank you!
|
espra/espra | 0971304a110ae8cd60badcb52404425fcb57c0b2 | CODE_OF_CONDUCT: set the tone for the community we want to build | diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..4c48e5a
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,99 @@
+# Code of Conduct
+
+Our community is based on mutual respect, tolerance, and encouragement, and we
+are working to help each other live up to these principles. We want our
+community to be accessible to all. Whoever you are, and whatever your
+background, we welcome you.
+
+## Conduct
+
+- Be welcoming and open-minded. Others may not have the same experience level or
+ background as you, but that doesn't mean they don't have good ideas to
+ contribute. We encourage you to be welcoming and courteous to others in your
+ interactions.
+
+- Assume no malice. We all make mistakes, and disagreements or differences of
+ opinion are a fact of life. Try to approach conflict from the perspective that
+ people generally mean well. This will promote a respectful and friendly
+ atmosphere where people feel comfortable asking questions, participating in
+ discussions, and making contributions.
+
+- Be clear. Communicating with strangers on the Internet can be awkward. It's
+ hard to convey or read tone, and sarcasm is frequently misunderstood. Try to
+ use clear language, and think about how it will be received by the other
+ person.
+
+- On our online community channels, please avoid usernames that might detract
+ from a welcoming environment for all.
+
+- We will exclude you from interaction if you insult, demean or harass anyone.
+ That is not welcome behaviour.
+
+- Don't impersonate other people by copying their avatars, posting content under
+ their email addresses, using similar usernames, or otherwise posing as someone
+ else.
+
+- Don't post other people's personal information, such as phone numbers, private
+ email addresses, physical addresses, credit card numbers, or passwords. This
+ is also a form of harassment as it may present safety risks or other harms.
+
+- Private harassment is also unacceptable. No matter who you are, if you feel
+ you have been or are being harassed by a community member, please contact one
+ of the core team members or any of the moderation team immediately. Whether
+ you're a regular contributor or a newcomer, we care about making this
+ community a safe place for you and we've got your back.
+
+- Likewise any spamming, trolling, flaming, baiting or other attention-stealing
+ behaviour is not welcome.
+
+## Contact
+
+Instances of abusive, harassing, or otherwise unacceptable behavior that
+violates our Code of Conduct may be reported by:
+
+- Emailing [email protected].
+
+- Contacting moderators who are present in the venue where the behaviour is
+ taking place, e.g. operators in online chat channels, organisers at offline
+ meetups, etc.
+
+## Moderation
+
+These are the policies for upholding our community's standards of conduct:
+
+- Behaviour that violates our standards of conduct is not allowed.
+
+- Behaviour that moderators find inappropriate, whether listed in the code of
+ conduct or not, is also not allowed.
+
+- Moderators will first respond to such remarks with a warning.
+
+- If the warning is unheeded, the person will be temporarily excluded from
+ interaction, e.g. by being asked to leave the venue at meetups, removed from
+ chat channels, etc.
+
+- If the person comes back and continues to make trouble, they will be banned,
+ i.e. indefinitely excluded.
+
+- Moderators may choose at their discretion to un-ban the person if it was a
+ first offense and they offer a genuine apology.
+
+- If a moderator bans someone and you think it was unjustified, please take it
+ up with that moderator, or with a different moderator, in private. Public
+ complaints about bans are not allowed.
+
+The enforcement policies listed above apply to all public interaction spaces
+where our community is present, e.g. online chat channels, GitHub repositories,
+offline meetups/events, etc.
+
+## Attribution
+
+This document is adapted from the [Python Diversity Statement], the [Rust Code
+of Conduct], the [GitHub Community Guidelines], the [Contributor Covenant], and
+the [#node.js Policy on Trolling].
+
+[#node.js policy on trolling]: https://blog.izs.me/2012/08/policy-on-trolling
+[contributor covenant]: https://www.contributor-covenant.org/version/1/4/code-of-conduct/
+[github community guidelines]: https://docs.github.com/en/github/site-policy/github-community-guidelines
+[python diversity statement]: https://www.python.org/community/diversity/
+[rust code of conduct]: https://www.rust-lang.org/policies/code-of-conduct
|
espra/espra | 5642d8586df895611c276d45809637830f528e8b | pkg/ident: support conversion between naming conventions | diff --git a/pkg/ident/ident.go b/pkg/ident/ident.go
new file mode 100644
index 0000000..7e368ba
--- /dev/null
+++ b/pkg/ident/ident.go
@@ -0,0 +1,258 @@
+// Public Domain (-) 2018-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// Package ident provides support for converting identifiers between different
+// naming conventions.
+package ident
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Parts represents the normalized elements of an identifier.
+type Parts [][]byte
+
+func (p Parts) String() string {
+ return string(bytes.Join(p, []byte{','}))
+}
+
+// ToCamel converts the identifier into a camelCased string.
+func (p Parts) ToCamel() string {
+ var out []byte
+ for idx, part := range p {
+ if idx == 0 {
+ out = append(out, bytes.ToLower(part)...)
+ } else {
+ out = append(out, part...)
+ }
+ }
+ return string(out)
+}
+
+// ToKebab converts the identifier into a kebab-cased string.
+func (p Parts) ToKebab() string {
+ var out []byte
+ for idx, part := range p {
+ if idx != 0 {
+ out = append(out, '-')
+ }
+ out = append(out, bytes.ToLower(part)...)
+ }
+ return string(out)
+}
+
+// ToPascal converts the identifier into a PascalCased string.
+func (p Parts) ToPascal() string {
+ var out []byte
+ for _, part := range p {
+ out = append(out, part...)
+ }
+ return string(out)
+}
+
+// ToScreamingSnake converts the identifier into a SCREAMING_SNAKE_CASED string.
+func (p Parts) ToScreamingSnake() string {
+ var out []byte
+ for idx, part := range p {
+ if idx != 0 {
+ out = append(out, '_')
+ }
+ out = append(out, bytes.ToUpper(part)...)
+ }
+ return string(out)
+}
+
+// ToSnake converts the identifier into a snake_cased string.
+func (p Parts) ToSnake() string {
+ var out []byte
+ for idx, part := range p {
+ if idx != 0 {
+ out = append(out, '_')
+ }
+ out = append(out, bytes.ToLower(part)...)
+ }
+ return string(out)
+}
+
+// add appends parts from the given element. It looks for runs of initialisms
+// like "HTTPAPIs" and adds them as separate parts, i.e. "HTTP" and "APIs". Once
+// all initialisms are detected, the remaining element is added as a single
+// part.
+func (p Parts) add(elem []byte) Parts {
+ // Try to match an initialism exactly.
+ if special, ok := mapping[string(bytes.ToUpper(elem))]; ok {
+ return append(p, []byte(special))
+ }
+ // Try to find the longest initialism matches from the start.
+ for len(elem) > 0 {
+ match := ""
+ pos := -1
+ for i := 0; i <= len(elem); i++ {
+ if special, ok := mapping[string(bytes.ToUpper(elem[:i]))]; ok {
+ match = special
+ pos = i
+ }
+ }
+ if pos == -1 {
+ p = append(p, elem)
+ break
+ }
+ p = append(p, []byte(match))
+ elem = elem[pos:]
+ }
+ return p
+}
+
+// tryAdd attempts to add parts from the given element. If any initialisms are
+// found, they are added in canonical form.
+func (p Parts) tryAdd(elem []byte) (Parts, []byte) {
+ var nelem []byte
+ // Try to match an initialism exactly.
+ if special, ok := mapping[string(bytes.ToUpper(elem))]; ok {
+ return append(p, []byte(special)), nil
+ }
+ // Try to match an initialism from the end for the longest identifier with a
+ // non-uppercase suffix.
+ last := ""
+ pos := -1
+ for i := len(elem) - 1; i >= 0; i-- {
+ if special, ok := mapping[string(bytes.ToUpper(elem[i:]))]; ok {
+ last = special
+ pos = i
+ }
+ }
+ if pos == -1 {
+ // NOTE(tav): The given elem must be at least 2 characters long. The
+ // code in FromPascal currently ensures this to be the case.
+ nelem = elem[len(elem)-2:]
+ elem = elem[:len(elem)-2]
+ } else {
+ elem = elem[:pos]
+ }
+ p = p.add(elem)
+ if len(last) > 0 {
+ p = append(p, []byte(last))
+ }
+ return p, nelem
+}
+
+// FromCamel parses the given camelCased identifier into its parts.
+func FromCamel(ident string) Parts {
+ var parts Parts
+ i := 0
+ for ; i < len(ident); i++ {
+ char := ident[i]
+ if char >= 'A' && char <= 'Z' {
+ break
+ }
+ }
+ parts = append(parts, normalize([]byte(ident[:i])))
+ // NOTE(tav): The error must be nil, as ident must be empty or start on an
+ // uppercase character, per the break clause above.
+ elems, _ := FromPascal(ident[i:])
+ return append(parts, elems...)
+}
+
+// FromKebab parses the given kebab-cased identifier into its parts.
+func FromKebab(ident string) Parts {
+ var (
+ elem []byte
+ parts Parts
+ )
+ for i := 0; i < len(ident); i++ {
+ char := ident[i]
+ if char == '-' {
+ if len(elem) == 0 {
+ continue
+ }
+ parts = append(parts, normalize(bytes.ToLower(elem)))
+ elem = []byte{}
+ } else {
+ elem = append(elem, char)
+ }
+ }
+ if len(elem) > 0 {
+ parts = append(parts, normalize(bytes.ToLower(elem)))
+ }
+ return parts
+}
+
+// FromPascal parses the given PascalCased identifier into its parts.
+func FromPascal(ident string) (Parts, error) {
+ var (
+ elem []byte
+ parts Parts
+ )
+ // Ensure the first character is upper case.
+ if len(ident) > 0 {
+ char := ident[0]
+ if char < 'A' || char > 'Z' {
+ return nil, fmt.Errorf("ident: invalid PascalCased identifier: %q", ident)
+ }
+ elem = append(elem, char)
+ }
+ caps := true
+ for i := 1; i < len(ident); i++ {
+ char := ident[i]
+ if char >= 'A' && char <= 'Z' {
+ if caps {
+ elem = append(elem, char)
+ } else {
+ caps = true
+ parts = parts.add(elem)
+ elem = []byte{char}
+ }
+ } else if caps {
+ caps = false
+ elem = append(elem, char)
+ parts, elem = parts.tryAdd(elem)
+ } else {
+ elem = append(elem, char)
+ }
+ }
+ if len(elem) > 0 {
+ parts = parts.add(elem)
+ }
+ return parts, nil
+}
+
+// FromScreamingSnake parses the given SCREAMING_SNAKE_CASED identifier into its
+// parts.
+func FromScreamingSnake(ident string) Parts {
+ return FromSnake(ident)
+}
+
+// FromSnake parses the given snake_cased identifier into its parts.
+func FromSnake(ident string) Parts {
+ var (
+ elem []byte
+ parts Parts
+ )
+ for i := 0; i < len(ident); i++ {
+ char := ident[i]
+ if char == '_' {
+ if len(elem) == 0 {
+ continue
+ }
+ parts = append(parts, normalize(bytes.ToLower(elem)))
+ elem = []byte{}
+ } else {
+ elem = append(elem, char)
+ }
+ }
+ if len(elem) > 0 {
+ parts = append(parts, normalize(bytes.ToLower(elem)))
+ }
+ return parts
+}
+
+func normalize(elem []byte) []byte {
+ if special, ok := mapping[string(bytes.ToUpper(elem))]; ok {
+ return []byte(special)
+ }
+ if len(elem) > 0 && 'a' <= elem[0] && elem[0] <= 'z' {
+ elem[0] -= 32
+ }
+ return elem
+}
diff --git a/pkg/ident/ident_test.go b/pkg/ident/ident_test.go
new file mode 100644
index 0000000..9bc1786
--- /dev/null
+++ b/pkg/ident/ident_test.go
@@ -0,0 +1,222 @@
+// Public Domain (-) 2020-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package ident
+
+import (
+ "strings"
+ "testing"
+)
+
+var spec = map[string]*definition{
+ "HTTPSServer": {
+ camel: "httpsServer",
+ kebab: "https-server",
+ },
+ "I": {
+ camel: "i",
+ kebab: "i",
+ },
+ "IDSet": {
+ camel: "idSet",
+ kebab: "id-set",
+ },
+ "IDs": {
+ camel: "ids",
+ kebab: "ids",
+ },
+ "IDsMap": {
+ camel: "idsMap",
+ kebab: "ids-map",
+ },
+ "NetworkCIDR": {
+ camel: "networkCIDR",
+ kebab: "network-cidr",
+ },
+ "PCRTestKit": {
+ camel: "pcrTestKit",
+ kebab: "pcr-test-kit",
+ },
+ "PeerAPIOp": {
+ camel: "peerAPIOp",
+ kebab: "peer-api-op",
+ },
+ "PeerIDs": {
+ camel: "peerIDs",
+ kebab: "peer-ids",
+ },
+ "ServiceAPIKey": {
+ camel: "serviceAPIKey",
+ kebab: "service-api-key",
+ },
+ "ServiceKey": {
+ camel: "serviceKey",
+ kebab: "service-key",
+ },
+ "UserACLIDs": {
+ camel: "userACLIDs",
+ kebab: "user-acl-ids",
+ },
+ "Username": {
+ camel: "username",
+ kebab: "username",
+ },
+ "XMLHTTP": {
+ camel: "xmlHTTP",
+ kebab: "xml-http",
+ },
+ "XMLHTTPRequest": {
+ camel: "xmlHTTPRequest",
+ kebab: "xml-http-request",
+ },
+}
+
+var tests = []testcase{
+ {"https-server", spec["HTTPSServer"]},
+ {"https-server-", spec["HTTPSServer"]},
+ {"-https-server", spec["HTTPSServer"]},
+ {"--https-server-", spec["HTTPSServer"]},
+ {"ids", spec["IDs"]},
+ {"ids-", spec["IDs"]},
+ {"-ids", spec["IDs"]},
+ {"--ids-", spec["IDs"]},
+ {"ids-map", spec["IDsMap"]},
+ {"ids-map-", spec["IDsMap"]},
+ {"-ids-map", spec["IDsMap"]},
+ {"--ids-map-", spec["IDsMap"]},
+ {"network-cidr", spec["NetworkCIDR"]},
+ {"network-cidr-", spec["NetworkCIDR"]},
+ {"-network-cidr", spec["NetworkCIDR"]},
+ {"--network-cidr-", spec["NetworkCIDR"]},
+ {"peer-api-op", spec["PeerAPIOp"]},
+ {"peer-api-op-", spec["PeerAPIOp"]},
+ {"-peer-api-op", spec["PeerAPIOp"]},
+ {"--peer-api-op-", spec["PeerAPIOp"]},
+ {"peer-ids", spec["PeerIDs"]},
+ {"peer-ids-", spec["PeerIDs"]},
+ {"-peer-ids", spec["PeerIDs"]},
+ {"--peer-ids-", spec["PeerIDs"]},
+ {"service-api-key", spec["ServiceAPIKey"]},
+ {"service-api-key-", spec["ServiceAPIKey"]},
+ {"-service-api-key", spec["ServiceAPIKey"]},
+ {"--service-api-key-", spec["ServiceAPIKey"]},
+ {"service-key", spec["ServiceKey"]},
+ {"service-key-", spec["ServiceKey"]},
+ {"-service-key", spec["ServiceKey"]},
+ {"--service-key-", spec["ServiceKey"]},
+ {"user-acl-ids", spec["UserACLIDs"]},
+ {"user-acl-ids-", spec["UserACLIDs"]},
+ {"-user-acl-ids", spec["UserACLIDs"]},
+ {"--user-acl-ids-", spec["UserACLIDs"]},
+ {"username", spec["Username"]},
+ {"username-", spec["Username"]},
+ {"-username", spec["Username"]},
+ {"--username-", spec["Username"]},
+ {"xml-http", spec["XMLHTTP"]},
+ {"xml-http-", spec["XMLHTTP"]},
+ {"-xml-http", spec["XMLHTTP"]},
+ {"--xml-http-", spec["XMLHTTP"]},
+ {"xml-http-request", spec["XMLHTTPRequest"]},
+ {"xml-http-request-", spec["XMLHTTPRequest"]},
+ {"-xml-http-request", spec["XMLHTTPRequest"]},
+ {"--xml-http-request-", spec["XMLHTTPRequest"]},
+}
+
+type definition struct {
+ camel string
+ kebab string
+ pascal string
+ screaming string
+ snake string
+}
+
+type testcase struct {
+ ident string
+ want *definition
+}
+
+func TestCamel(t *testing.T) {
+ for _, tt := range spec {
+ testConversion(t, "Camel", FromCamel, tt.camel, tt)
+ }
+}
+
+func TestKebab(t *testing.T) {
+ for _, tt := range tests {
+ testConversion(t, "Kebab", FromKebab, tt.ident, tt.want)
+ }
+}
+
+func TestPascal(t *testing.T) {
+ MustPascal := func(ident string) Parts {
+ parts, err := FromPascal(ident)
+ if err != nil {
+ t.Fatalf("FromPascal(%q) returned an unexpected error: %s", ident, err)
+ }
+ return parts
+ }
+ _, err := FromPascal("invalid")
+ if err == nil {
+ t.Errorf("FromPascal(%q) failed to return an error", "invalid")
+ }
+ for _, tt := range spec {
+ testConversion(t, "Pascal", MustPascal, tt.pascal, tt)
+ }
+}
+
+func TestScreamingSnake(t *testing.T) {
+ for _, tt := range tests {
+ ident := strings.ToUpper(strings.ReplaceAll(tt.ident, "-", "_"))
+ testConversion(t, "ScreamingSnake", FromScreamingSnake, ident, tt.want)
+ }
+}
+
+func TestSnake(t *testing.T) {
+ for _, tt := range tests {
+ ident := strings.ReplaceAll(tt.ident, "-", "_")
+ testConversion(t, "Snake", FromSnake, ident, tt.want)
+ }
+}
+
+func TestString(t *testing.T) {
+ ident := "HTTPAPIs"
+ parts, _ := FromPascal(ident)
+ got := parts.String()
+ want := "HTTP,APIs"
+ if got != want {
+ t.Errorf("FromPascal(%q).String() = %q: want %q", ident, got, want)
+ }
+}
+
+func testConversion(t *testing.T, typ string, conv func(string) Parts, ident string, want *definition) {
+ id := conv(ident)
+ got := id.ToCamel()
+ if got != want.camel {
+ t.Errorf("From%s(%q).ToCamel() = %q: want %q", typ, ident, got, want.camel)
+ }
+ got = id.ToKebab()
+ if got != want.kebab {
+ t.Errorf("From%s(%q).ToKebab() = %q: want %q", typ, ident, got, want.kebab)
+ }
+ got = id.ToPascal()
+ if got != want.pascal {
+ t.Errorf("From%s(%q).ToPascal() = %q: want %q", typ, ident, got, want.pascal)
+ }
+ got = id.ToScreamingSnake()
+ if got != want.screaming {
+ t.Errorf("From%s(%q).ToScreamingSnake() = %q: want %q", typ, ident, got, want.screaming)
+ }
+ got = id.ToSnake()
+ if got != want.snake {
+ t.Errorf("From%s(%q).ToSnake() = %q: want %q", typ, ident, got, want.snake)
+ }
+}
+
+func init() {
+ for pascal, definition := range spec {
+ definition.pascal = pascal
+ definition.snake = strings.ReplaceAll(definition.kebab, "-", "_")
+ definition.screaming = strings.ToUpper(definition.snake)
+ }
+ AddInitialism("PCR")
+}
|
espra/espra | ef327dd3a3606d37f8a054e136c68742ccf86ead | pkg/ident: add core set of initialisms | diff --git a/pkg/ident/initialism.go b/pkg/ident/initialism.go
new file mode 100644
index 0000000..f2e484b
--- /dev/null
+++ b/pkg/ident/initialism.go
@@ -0,0 +1,141 @@
+// Public Domain (-) 2018-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package ident
+
+import (
+ "strings"
+)
+
+var mapping = map[string]string{}
+
+// This list helps us satisfy the recommended naming style of variables in Go:
+// https://github.com/golang/go/wiki/CodeReviewComments#initialisms
+//
+// The list is always going to be incomplete, so please add to it as we come
+// across new initialisms.
+var initialisms = []string{
+ "ACK",
+ "ACL",
+ "ACLs",
+ "AES",
+ "ANSI",
+ "API",
+ "APIs",
+ "ARP",
+ "ASCII",
+ "ASN1",
+ "ATM",
+ "BGP",
+ "BIOS",
+ "BLAKE",
+ "BLAKE3",
+ "BSS",
+ "CA",
+ "CIDR",
+ "CLI",
+ "CLUI",
+ "CPU",
+ "CPUs",
+ "CRC",
+ "CSRF",
+ "CSS",
+ "CSV",
+ "DB",
+ "DBs",
+ "DHCP",
+ "DNS",
+ "DRM",
+ "EOF",
+ "EON",
+ "FTP",
+ "GRPC",
+ "GUID",
+ "GUIDs",
+ "HCL",
+ "HTML",
+ "HTTP",
+ "HTTPS",
+ "IANA",
+ "ICMP",
+ "ID",
+ "IDs",
+ "IEEE",
+ "IMAP",
+ "IP",
+ "IPs",
+ "IRC",
+ "ISO",
+ "ISP",
+ "JSON",
+ "LAN",
+ "LHS",
+ "MAC",
+ "MD5",
+ "MTU",
+ "NATO",
+ "NIC",
+ "NVRAM",
+ "OSI",
+ "PEM",
+ "POP3",
+ "QPS",
+ "QUIC",
+ "RAM",
+ "RFC",
+ "RFCs",
+ "RHS",
+ "RPC",
+ "SFTP",
+ "SHA",
+ "SHA1",
+ "SHA256",
+ "SHA512",
+ "SLA",
+ "SMTP",
+ "SQL",
+ "SRAM",
+ "SSH",
+ "SSID",
+ "SSL",
+ "SYN",
+ "TCP",
+ "TLS",
+ "TOML",
+ "TPS",
+ "TTL",
+ "UDP",
+ "UI",
+ "UID",
+ "UIDs",
+ "URI",
+ "URL",
+ "USB",
+ "UTF8",
+ "UUID",
+ "UUIDs",
+ "VLAN",
+ "VM",
+ "VPN",
+ "W3C",
+ "WPA",
+ "XML",
+ "XMPP",
+ "XON",
+ "XSRF",
+ "XSS",
+ "YAML",
+}
+
+// AddInitialism adds the given identifier to the set of initialisms. The given
+// identifier should be in the PascalCase form and have at most one lower-cased
+// letter which must be at the very end.
+func AddInitialism(ident string) {
+ mapping[strings.ToUpper(ident)] = ident
+}
+
+func init() {
+ for _, s := range initialisms {
+ mapping[strings.ToUpper(s)] = s
+ }
+}
|
espra/espra | a32e1c7c388a660939f11380a3020b45c4c3939e | pkg/osexit: add a mock of os.Exit for testing purposes | diff --git a/pkg/osexit/osexit.go b/pkg/osexit/osexit.go
new file mode 100644
index 0000000..3db9acc
--- /dev/null
+++ b/pkg/osexit/osexit.go
@@ -0,0 +1,85 @@
+// Public Domain (-) 2018-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// Package osexit mocks the os.Exit function.
+//
+// To use, first set a package-specific exit function, e.g.
+//
+// var exit = os.Exit
+//
+// Then use it instead of a direct call to os.Exit, e.g.
+//
+// if somethingFatal {
+// exit(1)
+// return
+// }
+//
+// Make sure to return immediately after the call to exit, so that testing code
+// will match real code as closely as possible.
+//
+// You can now use the utility functions provided by this package to override
+// exit for testing purposes, e.g.
+//
+// exit = osexit.Set()
+// invokeCodeCallingExit()
+// if !osexit.Called() {
+// t.Fatalf("os.Exit was not called as expected")
+// }
+//
+package osexit
+
+import (
+ "sync"
+)
+
+var (
+ called bool
+ mu sync.RWMutex // protects called, status
+ status int
+)
+
+// Called returns whether the mock os.Exit function was called.
+func Called() bool {
+ mu.RLock()
+ c := called
+ mu.RUnlock()
+ return c
+}
+
+// Func provides a mock for the os.Exit function. Special care must be taken
+// when testing os.Exit to make sure no code runs after the call to Exit. It's
+// recommended to put a return statement after Exit calls so that the behaviour
+// of the mock matches that of the real function as much as possible.
+func Func(code int) {
+ mu.Lock()
+ if called {
+ mu.Unlock()
+ return
+ }
+ called = true
+ status = code
+ mu.Unlock()
+}
+
+// Reset resets the state of the mock function.
+func Reset() {
+ mu.Lock()
+ called = false
+ status = 0
+ mu.Unlock()
+}
+
+// Set returns the mock os.Exit function after calling Reset.
+func Set() func(int) {
+ Reset()
+ return Func
+}
+
+// Status returns the status code that the mock os.Exit function was called
+// with.
+func Status() int {
+ mu.RLock()
+ s := status
+ mu.RUnlock()
+ return s
+}
diff --git a/pkg/osexit/osexit_test.go b/pkg/osexit/osexit_test.go
new file mode 100644
index 0000000..5d9fe39
--- /dev/null
+++ b/pkg/osexit/osexit_test.go
@@ -0,0 +1,38 @@
+// Public Domain (-) 2018-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package osexit_test
+
+import (
+ "os"
+ "testing"
+
+ "web4.cc/pkg/osexit"
+)
+
+var exit = os.Exit
+
+func TestExit(t *testing.T) {
+ exit = osexit.Set()
+ exit(2)
+ if !osexit.Called() {
+ t.Fatalf("mock exit function was not called")
+ }
+ status := osexit.Status()
+ if status != 2 {
+ t.Fatalf("mock exit function did not set the right status code: got %d, want 2", status)
+ }
+ exit(3)
+ status = osexit.Status()
+ if status != 2 {
+ t.Fatalf("mock exit function overrode the status set by a previous call: got %d, want 2", status)
+ }
+ osexit.Reset()
+ if osexit.Called() {
+ t.Fatalf("the reset mock exit function claims to have been called")
+ }
+ status = osexit.Status()
+ if status != 0 {
+ t.Fatalf("the reset mock exit function returned a non-zero status code: got %d, want 0", status)
+ }
+}
|
espra/espra | 9cdd3f207f0f4203d3107f533f6ccdd4fd144a5d | COPYING: add dummy file to satisfy pkg.go.dev | diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..9dc90ed
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,127 @@
+PLEASE IGNORE THIS FILE. IT'S ONLY HERE TO SATISFY PKG.GO.DEV.
+
+FOR THE ACTUAL LICENSE, PLEASE SEE: UNLICENSE.md
+
+-------------------------------------------------------------------------------
+
+Creative Commons Legal Code
+
+CC0 1.0 Universal
+
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
+ LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
+ ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
+ INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
+ REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
+ PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
+ THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
+ HEREUNDER.
+
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator
+and subsequent owner(s) (each and all, an "owner") of an original work of
+authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for
+the purpose of contributing to a commons of creative, cultural and
+scientific works ("Commons") that the public can reliably and without fear
+of later claims of infringement build upon, modify, incorporate in other
+works, reuse and redistribute as freely as possible in any form whatsoever
+and for any purposes, including without limitation commercial purposes.
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural and scientific
+works, or to gain reputation or greater distribution for their Work in
+part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any
+expectation of additional consideration or compensation, the person
+associating CC0 with a Work (the "Affirmer"), to the extent that he or she
+is an owner of Copyright and Related Rights in the Work, voluntarily
+elects to apply CC0 to the Work and publicly distribute the Work under its
+terms, with knowledge of his or her Copyright and Related Rights in the
+Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be
+protected by copyright and related or neighboring rights ("Copyright and
+Related Rights"). Copyright and Related Rights include, but are not
+limited to, the following:
+
+ i. the right to reproduce, adapt, distribute, perform, display,
+ communicate, and translate a Work;
+ ii. moral rights retained by the original author(s) and/or performer(s);
+iii. publicity and privacy rights pertaining to a person's image or
+ likeness depicted in a Work;
+ iv. rights protecting against unfair competition in regards to a Work,
+ subject to the limitations in paragraph 4(a), below;
+ v. rights protecting the extraction, dissemination, use and reuse of data
+ in a Work;
+ vi. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation
+ thereof, including any amended or successor version of such
+ directive); and
+vii. other similar, equivalent or corresponding rights throughout the
+ world based on applicable law or treaty, and any national
+ implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention
+of, applicable law, Affirmer hereby overtly, fully, permanently,
+irrevocably and unconditionally waives, abandons, and surrenders all of
+Affirmer's Copyright and Related Rights and associated claims and causes
+of action, whether now known or unknown (including existing as well as
+future claims and causes of action), in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or
+treaty (including future time extensions), (iii) in any current or future
+medium and for any number of copies, and (iv) for any purpose whatsoever,
+including without limitation commercial, advertising or promotional
+purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
+member of the public at large and to the detriment of Affirmer's heirs and
+successors, fully intending that such Waiver shall not be subject to
+revocation, rescission, cancellation, termination, or any other legal or
+equitable action to disrupt the quiet enjoyment of the Work by the public
+as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason
+be judged legally invalid or ineffective under applicable law, then the
+Waiver shall be preserved to the maximum extent permitted taking into
+account Affirmer's express Statement of Purpose. In addition, to the
+extent the Waiver is so judged Affirmer hereby grants to each affected
+person a royalty-free, non transferable, non sublicensable, non exclusive,
+irrevocable and unconditional license to exercise Affirmer's Copyright and
+Related Rights in the Work (i) in all territories worldwide, (ii) for the
+maximum duration provided by applicable law or treaty (including future
+time extensions), (iii) in any current or future medium and for any number
+of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising or promotional purposes (the
+"License"). The License shall be deemed effective as of the date CC0 was
+applied by Affirmer to the Work. Should any part of the License for any
+reason be judged legally invalid or ineffective under applicable law, such
+partial invalidity or ineffectiveness shall not invalidate the remainder
+of the License, and in such case Affirmer hereby affirms that he or she
+will not (i) exercise any of his or her remaining Copyright and Related
+Rights in the Work or (ii) assert any associated claims and causes of
+action with respect to the Work, in either case contrary to Affirmer's
+express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
+ surrendered, licensed or otherwise affected by this document.
+ b. Affirmer offers the Work as-is and makes no representations or
+ warranties of any kind concerning the Work, express, implied,
+ statutory or otherwise, including without limitation warranties of
+ title, merchantability, fitness for a particular purpose, non
+ infringement, or the absence of latent or other defects, accuracy, or
+ the present or absence of errors, whether or not discoverable, all to
+ the greatest extent permissible under applicable law.
+ c. Affirmer disclaims responsibility for clearing rights of other persons
+ that may apply to the Work or any use thereof, including without
+ limitation any person's Copyright and Related Rights in the Work.
+ Further, Affirmer disclaims responsibility for obtaining any necessary
+ consents, permissions or other rights required for any use of the
+ Work.
+ d. Affirmer understands and acknowledges that Creative Commons is not a
+ party to this document and has no duty or obligation with respect to
+ this CC0 or use of the Work.
|
espra/espra | 9ce68dbd1be1770823d4610cb956843695c0e39c | pkg/checked: add package for detecting integer overflows | diff --git a/pkg/checked/checked.go b/pkg/checked/checked.go
new file mode 100644
index 0000000..56210a9
--- /dev/null
+++ b/pkg/checked/checked.go
@@ -0,0 +1,15 @@
+// Public Domain (-) 2018-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+// Package checked adds support for detecting overflows on integer operations.
+package checked
+
+// MulU64 returns the result of multiplying two uint64 values, and whether it's
+// safe or overflows.
+func MulU64(a, b uint64) (v uint64, ok bool) {
+ res := a * b
+ if a != 0 && res/a != b {
+ return res, false
+ }
+ return res, true
+}
diff --git a/pkg/checked/checked_test.go b/pkg/checked/checked_test.go
new file mode 100644
index 0000000..b759ae8
--- /dev/null
+++ b/pkg/checked/checked_test.go
@@ -0,0 +1,24 @@
+// Public Domain (-) 2018-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package checked
+
+import (
+ "testing"
+)
+
+func TestMulU64(t *testing.T) {
+ for _, tt := range []struct {
+ a uint64
+ b uint64
+ want bool
+ }{
+ {4294967291, 4294967271, true},
+ {4294967291, 4294967321, false},
+ } {
+ v, ok := MulU64(tt.a, tt.b)
+ if ok != tt.want {
+ t.Errorf("MulU64(%d, %d) = %d: want %v", tt.a, tt.b, v, tt.want)
+ }
+ }
+}
|
espra/espra | 441af215859e9b8d9486df14c11977249e7612a4 | website: add base setup for the web4.cc site | diff --git a/website/.gcloudignore b/website/.gcloudignore
new file mode 100644
index 0000000..587f76f
--- /dev/null
+++ b/website/.gcloudignore
@@ -0,0 +1,6 @@
+.gcloudignore
+.git
+.gitignore
+
+*.out
+*.test
diff --git a/website/app.yaml b/website/app.yaml
new file mode 100644
index 0000000..3d3998d
--- /dev/null
+++ b/website/app.yaml
@@ -0,0 +1,4 @@
+# Public Domain (-) 2018-present, The Web4 Authors.
+# See the Web4 UNLICENSE file for details.
+
+runtime: go115
diff --git a/website/go.mod b/website/go.mod
new file mode 100644
index 0000000..129086a
--- /dev/null
+++ b/website/go.mod
@@ -0,0 +1,3 @@
+module web4.cc/website
+
+go 1.16
diff --git a/website/website.go b/website/website.go
new file mode 100644
index 0000000..b86ffac
--- /dev/null
+++ b/website/website.go
@@ -0,0 +1,79 @@
+// Public Domain (-) 2018-present, The Web4 Authors.
+// See the Web4 UNLICENSE file for details.
+
+package main
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+)
+
+const (
+ goGetResponse = `<!doctype html>
+<meta name="go-import" content="web4.cc git https://github.com/espra/web4">
+<meta name="go-source" content="web4.cc https://github.com/espra/web4 https://github.com/espra/web4/tree/main{/dir} https://github.com/espra/web4/blob/main{/dir}/{file}#L{line}">`
+ slackInviteURL = "https://join.slack.com/t/espra/shared_invite/enQtOTAxMjMxOTI3NDkwLTc4YWUxYzIxMmNjMDU3MmVhNjA2YTc4YjUxZDQwNjgzZTcxMmJiMDU2YmQyNDdmMmUxZTM2OWU0ODUyMGJkODY"
+)
+
+// We assume a prod environment if the PORT environment variable has been set.
+var isProd bool
+
+func handle(w http.ResponseWriter, r *http.Request) {
+ if isProd {
+ if r.Host != "web4.cc" || r.Header.Get("X-Forwarded-Proto") == "http" {
+ url := r.URL
+ url.Host = "web4.cc"
+ url.Scheme = "https"
+ http.Redirect(w, r, url.String(), http.StatusMovedPermanently)
+ return
+ }
+ w.Header().Set("Strict-Transport-Security", "max-age=31536000; includeSubDomains; preload")
+ }
+ if r.URL.Query().Get("go-get") != "" {
+ w.Write([]byte(goGetResponse))
+ return
+ }
+ split := strings.Split(r.URL.Path, "/")
+ if len(split) >= 2 {
+ switch split[1] {
+ case "cmd", "infra", "pkg", "service", "website":
+ if r.URL.RawQuery != "" {
+ http.NotFound(w, r)
+ return
+ }
+ http.Redirect(w, r, "https://pkg.go.dev/web4.cc/"+strings.Join(split[1:], "/"), http.StatusFound)
+ return
+ case "health":
+ w.Write([]byte("OK"))
+ return
+ case "slack":
+ http.Redirect(w, r, slackInviteURL, http.StatusFound)
+ return
+ }
+ }
+ http.Redirect(w, r, "https://github.com/espra/web4", http.StatusFound)
+}
+
+func main() {
+ port := os.Getenv("PORT")
+ if port == "" {
+ port = "8080"
+ } else {
+ isProd = true
+ }
+ http.HandleFunc("/", handle)
+ srv := &http.Server{
+ Addr: fmt.Sprintf(":%s", port),
+ Handler: http.DefaultServeMux,
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ }
+ log.Printf("Listening on http://localhost%s\n", srv.Addr)
+ if err := srv.ListenAndServe(); err != nil {
+ log.Fatalf("Failed to run HTTP server: %s\n", err)
+ }
+}
|
espra/espra | d92f7c5ecdd54301dc5f59b961b293aafab5b80d | go: init module | diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..a8a8188
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,3 @@
+module web4.cc
+
+go 1.16
|
espra/espra | dcb0d41294dad7d16e63b4d0cd0e028fe5a03e20 | AUTHORS: add the initial set of authors | diff --git a/AUTHORS.eon b/AUTHORS.eon
new file mode 100644
index 0000000..ba71f09
--- /dev/null
+++ b/AUTHORS.eon
@@ -0,0 +1,36 @@
+# Web4 Authors
+# ============
+#
+# This is the official list of the Web4 Authors ("The Authors").
+#
+# By adding yourself to this file, you are affirming that all Contributions that
+# you make to Web4 ("The Work") are:
+#
+# * Created in whole or in part by yourself, and that you have the right to
+# submit it under the terms of the [Web4 UNLICENSE], and voluntarily elect to
+# place it under those terms; or
+#
+# * Based on Third Party Work that, to the best of your knowledge, is covered
+# under a compatible open source license which gives you the right to submit
+# that work with modifications, where the license is indicated in the contents
+# of the related files or referenced within a top-level `THIRDPARTY.eon` file.
+#
+# You also acknowledge that this is a public project and that all Contributions
+# you make, including the record of those Contributions, along with any personal
+# information that you submit with them, are intended to be maintained
+# indefinitely, and may be redistributed via any medium.
+#
+# Thank you!
+#
+# [Web4 UNLICENSE]: UNLICENSE.md
+
+tav {
+ email = [email protected]
+ github = tav
+ location {
+ area = London
+ country = GB
+ }
+ name = tav
+ twitter = tav
+}
|
espra/espra | 16ddb470a625fae7a91467b51c908b87686fdc41 | UNLICENSE: place the repo into the public domain | diff --git a/UNLICENSE.md b/UNLICENSE.md
new file mode 100644
index 0000000..80a69cb
--- /dev/null
+++ b/UNLICENSE.md
@@ -0,0 +1,183 @@
+# Web4 UNLICENSE
+
+In the spirit of contributing to the Public Domain, to the full extent possible
+under law, the Web4 Authors ("The Authors"), as specified in the [`AUTHORS.eon`]
+file, have waived all copyright and related or neighboring rights to their
+Contributions to Web4 ("The Work").
+
+This does not apply to works authored by third parties ("Third Party Works")
+which come with their own copyright and licensing terms. These terms may be
+defined in explicit files as specified within an optional [`THIRDPARTY.eon`]
+file or specified as part of the contents of licensed files. We recommend you
+read them as their terms may differ from the terms below.
+
+With the exception of Third Party Works, all files in this repository are
+covered by this UNLICENSE. If desired, an informative header like the following
+could be used to explicitly specify that a file is covered by this UNLICENSE:
+
+ // Public Domain (-) 2021-present, The Web4 Authors.
+ // See the Web4 UNLICENSE file for details.
+
+All trademarks and registered trademarks mentioned in The Work are the property
+of their respective owners.
+
+## Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator and
+subsequent owner(s) (each and all, an "owner") of an original work of authorship
+and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for the
+purpose of contributing to a commons of creative, cultural, and scientific works
+("Commons") that the public can reliably and without fear of later claims of
+infringement build upon, modify, incorporate in other works, reuse, and
+redistribute as freely as possible in any form whatsoever and for any purposes,
+including without limitation commercial purposes.
+
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural, and scientific works,
+or to gain reputation or greater distribution for their Work in part through the
+use and efforts of others.
+
+For these and/or other purposes and motivations, and without any expectation of
+additional consideration or compensation, the Authors, to the extent that they
+are an owner of Copyright and Related Rights in the Work, voluntarily elect to
+apply this UNLICENSE to the Work and publicly distribute the Work under these
+terms, with knowledge of their Copyright and Related Rights in the Work and the
+meaning and intended legal effect of this UNLICENSE on those rights.
+
+## Definitions
+
+The term "distribute" has the same meaning here as under U.S. copyright law. A
+"Contribution" is the original Work, or any additions or changes to it.
+
+A Work made available under this UNLICENSE may be protected by copyright and
+related or neighboring rights ("Copyright and Related Rights"). Copyright and
+Related Rights include, but are not limited to, the following:
+
+1. the right to reproduce, adapt, distribute, perform, display, communicate, and
+ translate a Work;
+
+2. moral rights retained by the original author(s) and/or performer(s);
+
+3. publicity and privacy rights pertaining to a person's image or likeness
+ depicted in a Work;
+
+4. rights protecting against unfair competition in regards to a Work, subject to
+ the Limitations and Disclaimers, below;
+
+5. rights protecting the extraction, dissemination, use, and reuse of data in a
+ Work;
+
+6. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation thereof,
+ including any amended or successor version of such directive); and
+
+7. other similar, equivalent or corresponding rights throughout the world based
+ on applicable law or treaty, and any national implementations thereof.
+
+## Waiver
+
+To the greatest extent permitted by, but not in contravention of, applicable
+law, the Authors hereby overtly, fully, permanently, irrevocably, and
+unconditionally waive, abandon, and surrender all of their Copyright and Related
+Rights and associated claims and causes of action, whether now known or unknown
+(including existing as well as future claims and causes of action), in the Work
+(i) in all territories worldwide, (ii) for the maximum duration provided by
+applicable law or treaty (including future time extensions), (iii) in any
+current or future medium and for any number of copies, and (iv) for any purpose
+whatsoever, including without limitation commercial, advertising, or promotional
+purposes (the "Waiver").
+
+The Authors make the Waiver for the benefit of each member of the public at
+large and to the detriment of their heirs and successors, fully intending that
+such Waiver shall not be subject to revocation, rescission, cancellation,
+termination, or any other legal or equitable action to disrupt the quiet
+enjoyment of the Work by the public as contemplated by the Authors' express
+Statement of Purpose.
+
+## Public License Fallback
+
+Should any part of the Waiver for any reason be judged legally invalid or
+ineffective under applicable law, then the Waiver shall be preserved to the
+maximum extent permitted taking into account the Authors' express Statement of
+Purpose. In addition, to the extent the Waiver is so judged the Authors hereby
+grant to each affected person a royalty-free, non transferable, non
+sublicensable, non exclusive, irrevocable, and unconditional license to exercise
+the Authors' Copyright and Related Rights in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or treaty
+(including future time extensions), (iii) in any current or future medium, and
+for any number of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising, or promotional purposes (the "Public
+License").
+
+The Public License shall be deemed effective as of the date this UNLICENSE was
+first applied to the Work. Should any part of the Public License for any reason
+be judged legally invalid or ineffective under applicable law, such partial
+invalidity or ineffectiveness shall not invalidate the remainder of the Public
+License, and in such case the Authors hereby affirm that they will not (i)
+exercise any of their remaining Copyright and Related Rights in the Work or (ii)
+assert any associated claims and causes of action with respect to the Work, in
+either case contrary to their express Statement of Purpose.
+
+## Grant of Patent Rights
+
+The Authors hereby grant to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section) patent
+license to make, have made, use, offer to sell, sell, import, transfer, and
+otherwise run, modify, and propagate the contents of this Work, where such
+license applies only to those patent claims, both currently owned or controlled
+by any of the Authors and acquired in the future, licensable by any of the
+Authors that are necessarily infringed by this Work.
+
+This grant does not include claims that would be infringed only as a consequence
+of further modification of this Work.
+
+If you or your agent or exclusive licensee institute or order or agree to the
+institution of patent litigation against any entity (including a cross-claim or
+counterclaim in a lawsuit) alleging that this Work or any Contribution
+incorporated within this Work constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent rights
+granted to you under this Grant of Patent Rights for the Work shall terminate as
+of the date such litigation is filed.
+
+## Limitations and Disclaimers
+
+1. No trademark rights held by any of the Authors are waived, abandoned,
+ surrendered, licensed, or otherwise affected by this document.
+
+2. The Authors offer the Work as-is and makes no representations or warranties
+ of any kind concerning the Work, express, implied, statutory, or otherwise,
+ including without limitation warranties of title, merchantability, fitness
+ for a particular purpose, non infringement, or the absence of latent or other
+ defects, accuracy, or the present or absence of errors, whether or not
+ discoverable, all to the greatest extent permissible under applicable law.
+
+ In no event shall the Authors be liable for any direct, indirect, incidental,
+ special, exemplary, or consequential damages (including, but not limited to,
+ procurement of substitute goods or services; loss of use, data, or profits;
+ or business interruption) however caused and on any theory of liability,
+ whether in contract, strict liability, or tort (including negligence or
+ otherwise) arising in any way out of the use of the Work, even if advised of
+ the possibility of such damage.
+
+3. The Authors disclaim responsibility for clearing rights of other persons that
+ may apply to the Work or any use thereof, including without limitation any
+ person's Copyright and Related Rights in the Work. Further, the Authors
+ disclaim responsibility for obtaining any necessary consents, permissions or
+ other rights required for any use of the Work.
+
+## Appendix
+
+- The text of this document is derived from [Creative Commons CC0 1.0
+ Universal] and the [Patent Grant] that ships with Google Go.
+
+- This UNLICENSE is seen as a mere transitional requirement until international
+ law adapts to the post intellectual property reality.
+
+[`authors.eon`]: AUTHORS.eon
+[`thirdparty.eon`]: THIRDPARTY.eon
+[creative commons cc0 1.0 universal]: https://creativecommons.org/publicdomain/zero/1.0/legalcode
+[patent grant]: https://github.com/golang/go/blob/master/PATENTS
|
espra/espra | 3304912a7c1e7682cf75e8da0e86d47808540e40 | pkg/big: define the base API for the package | diff --git a/pkg/big/big.go b/pkg/big/big.go
new file mode 100644
index 0000000..0a75fc3
--- /dev/null
+++ b/pkg/big/big.go
@@ -0,0 +1,251 @@
+// Public Domain (-) 2020-present, The Core Authors.
+// See the Core UNLICENSE file for details.
+
+// Package big implements arbitrary-precision decimals.
+//
+// Unlike other decimal packages, we opt for a cleaner API for developers:
+//
+// * Instead of returning NaNs on invalid operations, methods like Div and Sqrt
+// return explicit errors instead. This makes it more obvious to developers
+// when they should check for error conditions.
+//
+// * Instead of rounding on every operation, we only round on specific methods
+// like Div, Sqrt, and String, but otherwise do lossless calculations. If a
+// developer desires rounding after every operation, they can call the
+// explicit Round method.
+//
+// * Instead of having a max precision set in a configurable context that has
+// to be constantly passed around, all operations default to infinite
+// precision. If explicit precision is desired, then the ChangePrec method
+// can be used with an explicit rounding mode after every operation.
+//
+// * Similarly, instead of having a max scale and rounding method set in a
+// context, methods like Div, Sqrt, and String default to a max scale of 20,
+// and ToNearestEven rounding. If explicit max scale and rounding mode are
+// desired, then there the corresponding DivExplicit, SqrtExplicit, and
+// StringExplicit methods can be used instead.
+package big
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+)
+
+// DefaultMaxScale defines the default maximum scale, i.e. the number of digits
+// to the right of the decimal point in a number.
+const DefaultMaxScale = 20
+
+// Rounding modes.
+const (
+ AwayFromZero Rounding = iota
+ ToNearestEven
+ ToNegativeInf
+ ToPositiveInf
+ ToZero
+)
+
+// Error values.
+var (
+ ErrDivisionByZero = errors.New("big: division by zero")
+ ErrSqrtOfNegative = errors.New("big: sqrt of negative number")
+)
+
+// Decimal represents an arbitrary-precision decimal.
+type Decimal struct {
+ base uint64
+ excess big.Int
+ state uint64
+}
+
+// Abs sets d to the absolute value of x, and returns d.
+func (d *Decimal) Abs(x *Decimal) *Decimal {
+ return d
+}
+
+// Add sets d to the sum of x and y, and returns d.
+func (d *Decimal) Add(x *Decimal, y *Decimal) *Decimal {
+ return d
+}
+
+// ChangePrec changes d's precision to prec, and returns the (possibly) rounded
+// value of d according to the given rounding mode.
+func (d *Decimal) ChangePrec(prec uint, rounding Rounding) *Decimal {
+ return d
+}
+
+// Cmp compares d and x, and returns:
+//
+// -1 if d < x
+// 0 if d == x
+// +1 if d > x
+func (d *Decimal) Cmp(x *Decimal) int {
+ return 0
+}
+
+// Copy sets d to x, and returns d.
+func (d *Decimal) Copy(x *Decimal) *Decimal {
+ d.base = x.base
+ d.excess = x.excess
+ d.state = x.state
+ return d
+}
+
+// Div sets d to the division of x by y, and returns d. It returns an error if y
+// is zero, will use a max scale of 20, and if rounding is needed, will use
+// ToNearestEven rounding.
+func (d *Decimal) Div(x *Decimal, y *Decimal) (*Decimal, error) {
+ return d, nil
+}
+
+// DivExplicit behaves the same as Div, except with a specific max scale and
+// rounding mode.
+func (d *Decimal) DivExplicit(x *Decimal, y *Decimal, scale uint, rounding Rounding) (*Decimal, error) {
+ return d, nil
+}
+
+// Format implements the interface for fmt.Formatter.
+func (d *Decimal) Format(s fmt.State, format rune) {
+}
+
+// GetRaw returns the components that represent the raw value of d.
+func (d *Decimal) GetRaw() (base uint64, excess *big.Int, state uint64) {
+ return d.base, &d.excess, d.state
+}
+
+// Int64 returns the integer resulting from truncating d towards zero. If the
+// resulting value would be out of bounds for an int64, then ok will be false.
+func (d *Decimal) Int64() (v int64, ok bool) {
+ return 0, false
+}
+
+// IsInt returns whether the value is an integer.
+func (d *Decimal) IsInt() bool {
+ return false
+}
+
+// Mul sets d to the product of x and y, and returns d.
+func (d *Decimal) Mul(x *Decimal, y *Decimal) *Decimal {
+ return d
+}
+
+// Neg sets d to the negated value of x, and returns d.
+func (d *Decimal) Neg(x *Decimal) *Decimal {
+ return d
+}
+
+// Round rescales d to the given maximum scale, and returns the rounded value of
+// d according to the given rounding mode.
+func (d *Decimal) Round(scale uint, rounding Rounding) *Decimal {
+ return d
+}
+
+// SetInt64 sets d's value to v, and returns d.
+func (d *Decimal) SetInt64(v int64) *Decimal {
+ return d
+}
+
+// SetRaw sets d to the value signified by the raw components.
+func (d *Decimal) SetRaw(base uint64, excess big.Int, state uint64) {
+ d.base = base
+ d.excess = excess
+ d.state = state
+}
+
+// SetString sets d to the parsed value to v in the given base, and returns d.
+func (d *Decimal) SetString(v string, base uint) (*Decimal, error) {
+ return d, nil
+}
+
+// SetUint64 sets d's value to v, and returns d.
+func (d *Decimal) SetUint64(v uint64) *Decimal {
+ return d
+}
+
+// Sign returns:
+//
+// -1 if d < 0
+// 0 if d == 0
+// +1 if d > 0
+func (d *Decimal) Sign() int {
+ return 0
+}
+
+// Sqrt sets d to the square root of x, and returns d. It returns an error if x
+// is negative, will use a max scale of 20, and if rounding is needed, will use
+// ToNearestEven rounding.
+func (d *Decimal) Sqrt(x *Decimal) (*Decimal, error) {
+ return d, nil
+}
+
+// SqrtExplicit behaves the same as Sqrt, except with a specific max scale and
+// rounding mode.
+func (d *Decimal) SqrtExplicit(x *Decimal, scale uint, rounding Rounding) (*Decimal, error) {
+ return d, nil
+}
+
+// String returns a decimal representation of d. It will use a max scale of 20,
+// and if rounding is needed, will use ToNearestEven rounding.
+func (d *Decimal) String() string {
+ return ""
+}
+
+// StringExplicit behaves the same as String, except with a specific max scale
+// and rounding mode.
+func (d *Decimal) StringExplicit(scale uint, rounding Rounding) string {
+ return ""
+}
+
+// Sub sets d to the difference between x and y, and returns d.
+func (d *Decimal) Sub(x *Decimal, y *Decimal) *Decimal {
+ return d
+}
+
+// Uint64 returns the unsigned integer resulting from truncating d towards zero.
+// If the resulting value would be out of bounds for a uint64, then ok will be
+// false.
+func (d *Decimal) Uint64() (v uint64, ok bool) {
+ return 0, false
+}
+
+// Rounding specifies the rounding mode for certain operations.
+type Rounding int
+
+// FromInt64 returns a decimal with the value set to v.
+func FromInt64(v int64) *Decimal {
+ return New().SetInt64(v)
+}
+
+// FromRaw returns a decimal with the value signified by the raw components.
+func FromRaw(base uint64, excess big.Int, state uint64) *Decimal {
+ return &Decimal{
+ base: base,
+ excess: excess,
+ state: state,
+ }
+}
+
+// FromString returns a decimal with the parsed value of v in the given base.
+func FromString(v string, base uint) (*Decimal, error) {
+ return New().SetString(v, base)
+}
+
+// FromUint64 returns a decimal with the value set to v.
+func FromUint64(v uint64) *Decimal {
+ return New().SetUint64(v)
+}
+
+// MustDecimal returns a decimal value from the given decimal string
+// representation. It panics if there was an error parsing the string.
+func MustDecimal(v string) *Decimal {
+ d, err := FromString(v, 10)
+ if err != nil {
+ panic(err)
+ }
+ return d
+}
+
+// New returns a zero valued decimal.
+func New() *Decimal {
+ return &Decimal{}
+}
|
espra/espra | c48f190cabc955c09786a4ee3ed00659fa3aa3f0 | pkg/xon: define the base API for the package | diff --git a/pkg/xon/xon.go b/pkg/xon/xon.go
new file mode 100644
index 0000000..8e6107b
--- /dev/null
+++ b/pkg/xon/xon.go
@@ -0,0 +1,192 @@
+// Public Domain (-) 2020-present, The Core Authors.
+// See the Core UNLICENSE file for details.
+
+// Package xon implements the eXtensible Object Notation (XON) format.
+package xon
+
+import (
+ "fmt"
+ "math/big"
+ "strconv"
+)
+
+var floatZero = big.NewFloat(0)
+
+// IsZeroer defines the interface that struct values should implement so that
+// the marshaler can determine whether to omit the value.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// into valid XON.
+type Marshaler interface {
+ MarshalXON() ([]byte, error)
+}
+
+// Number represents a arbitrary-precision number.
+type Number struct {
+ raw string
+}
+
+// BigFloat tries to parse a big.Float value from the number. The returned value
+// is set to the equivalent precision of a binary128 value, i.e. 113 bits, and
+// has the rounding mode set to nearest even.
+func (n Number) BigFloat() (*big.Float, error) {
+ if n.raw == "" {
+ return &big.Float{}, nil
+ }
+ v, _, err := big.ParseFloat(n.raw, 10, 113, big.ToNearestEven)
+ return v, err
+}
+
+// BigInt tries to parse a big.Int value from the number.
+func (n Number) BigInt() (*big.Int, error) {
+ if n.raw == "" {
+ return &big.Int{}, nil
+ }
+ v, ok := new(big.Int).SetString(n.raw, 10)
+ if !ok {
+ return nil, fmt.Errorf("xon: unable to parse %q into a big.Int", n.raw)
+ }
+ return v, nil
+}
+
+// Float64 tries to parse a float64 value from the number.
+func (n Number) Float64() (float64, error) {
+ if n.raw == "" {
+ return 0, nil
+ }
+ return strconv.ParseFloat(n.raw, 64)
+}
+
+// Int64 tries to parse an int64 value from the number.
+func (n Number) Int64() (int64, error) {
+ if n.raw == "" {
+ return 0, nil
+ }
+ return strconv.ParseInt(n.raw, 10, 64)
+}
+
+// IsZero implements the IsZeroer interface.
+func (n Number) IsZero() bool {
+ if n.raw == "" {
+ return true
+ }
+ f, err := n.BigFloat()
+ if err != nil {
+ panic(fmt.Errorf("xon: failed to convert %q into a big.Float: %s", n.raw, err))
+ }
+ return f.Cmp(floatZero) == 0
+}
+
+// MarshalXON implements the Marshaler interface.
+func (n Number) MarshalXON() ([]byte, error) {
+ return []byte(n.raw), nil
+}
+
+func (n Number) String() string {
+ if n.raw == "" {
+ return "0"
+ }
+ return n.raw
+}
+
+// Uint64 tries to parse a uint64 value from the number.
+func (n Number) Uint64() (uint64, error) {
+ if n.raw == "" {
+ return 0, nil
+ }
+ return strconv.ParseUint(n.raw, 10, 64)
+}
+
+// UnmarshalXON implements the Marshaler interface.
+func (n *Number) UnmarshalXON(data []byte) error {
+ if len(data) == 0 {
+ n.raw = ""
+ return nil
+ }
+ // TODO(tav): Perhaps validate the number.
+ n.raw = string(data)
+ return nil
+}
+
+// UnitValue represents a numeric component followed by a unit.
+type UnitValue struct {
+ Unit string
+ Value Number
+}
+
+// IsZero implements the IsZeroer interface.
+func (u UnitValue) IsZero() bool {
+ return u.Unit == "" && u.Value.IsZero()
+}
+
+func (u UnitValue) String() string {
+ return u.Value.String() + u.Unit
+}
+
+// Unmarshaler is the interface implemented by types that can unmarshal a XON
+// description of themselves. The input can be assumed to be a valid encoding of
+// a XON value. UnmarshalXON must copy the XON data if it wishes to retain the
+// data after returning.
+type Unmarshaler interface {
+ UnmarshalXON([]byte) error
+}
+
+// Variant represents the name and fields of an enum variant.
+type Variant struct {
+ Fields map[string]interface{}
+ Name string
+}
+
+// IsZero implements the IsZeroer interface.
+func (v Variant) IsZero() bool {
+ return len(v.Fields) == 0 && v.Name == ""
+}
+
+// Format will reformat the given src in the canonical XON style.
+func Format(src []byte) ([]byte, error) {
+ return nil, nil
+}
+
+// Marshal returns the XON encoding of v.
+func Marshal(v interface{}) ([]byte, error) {
+ return nil, nil
+}
+
+// RegisterVariant allows for the registration of an enum variant. The given typ
+// must be a pointer to an interface, and the variant must be a pointer to a
+// struct implementing that interface. The name of the struct type will be used
+// when the variant is converted to/from XON.
+func RegisterVariant(typ interface{}, variant interface{}) error {
+ return nil
+}
+
+// Unmarshal parses the XON-encoded data and stores the result in the value
+// pointed to by v.
+//
+// When unmarshalling into an empty interface value, the following types are
+// used:
+//
+// []byte, for XON binary blobs
+// bool, for XON booleans
+// bytesize.Value, for XON bytesize values
+// Duration, for XON duration values
+// Number, for XON numbers
+// string, for XON strings
+// time.Time, for XON dates and timestamps
+// UnitValue, for XON unit values
+// []interface{}, for XON lists
+// map[string]interface{}, for XON structs
+// Variant, for XON enum variants
+//
+func Unmarshal(data []byte, v interface{}) error {
+ return nil
+}
+
+// UnmarshalStrict behaves the same as Unmarshal, except that it will error when
+// decoding a field into a struct that doesn't have a field with the same name.
+func UnmarshalStrict(data []byte, v interface{}) error {
+ return nil
+}
|
espra/espra | 946aad187969a65a91d0a1db688f1377eb1ea6ae | pkg/ident: add support for converting between case styles | diff --git a/pkg/ident/ident.go b/pkg/ident/ident.go
new file mode 100644
index 0000000..2112d93
--- /dev/null
+++ b/pkg/ident/ident.go
@@ -0,0 +1,257 @@
+// Public Domain (-) 2018-present, The Core Authors.
+// See the Core UNLICENSE file for details.
+
+// Package ident provides support for converting identifiers between different
+// case styles.
+package ident
+
+import (
+ "bytes"
+)
+
+// Parts represents the normalised elements of an identifier.
+type Parts [][]byte
+
+func (p Parts) String() string {
+ return string(bytes.Join(p, []byte{','}))
+}
+
+// ToCamel converts the identifier into a camelCased string.
+func (p Parts) ToCamel() string {
+ var out []byte
+ for idx, part := range p {
+ if idx == 0 {
+ out = append(out, bytes.ToLower(part)...)
+ } else {
+ out = append(out, part...)
+ }
+ }
+ return string(out)
+}
+
+// ToKebab converts the identifier into a kebab-cased string.
+func (p Parts) ToKebab() string {
+ var out []byte
+ last := len(p) - 1
+ for idx, part := range p {
+ out = append(out, bytes.ToLower(part)...)
+ if idx != last {
+ out = append(out, '-')
+ }
+ }
+ return string(out)
+}
+
+// ToPascal converts the identifier into a PascalCased string.
+func (p Parts) ToPascal() string {
+ var out []byte
+ for _, part := range p {
+ out = append(out, part...)
+ }
+ return string(out)
+}
+
+// ToScreamingSnake converts the identifier into a SCREAMING_SNAKE_CASED string.
+func (p Parts) ToScreamingSnake() string {
+ var out []byte
+ last := len(p) - 1
+ for idx, part := range p {
+ out = append(out, bytes.ToUpper(part)...)
+ if idx != last {
+ out = append(out, '_')
+ }
+ }
+ return string(out)
+}
+
+// ToSnake converts the identifier into a snake_cased string.
+func (p Parts) ToSnake() string {
+ var out []byte
+ last := len(p) - 1
+ for idx, part := range p {
+ out = append(out, bytes.ToLower(part)...)
+ if idx != last {
+ out = append(out, '_')
+ }
+ }
+ return string(out)
+}
+
+// FromCamel parses the given camelCased identifier into its parts.
+func FromCamel(ident string) Parts {
+ var parts Parts
+ i := 0
+ for ; i < len(ident); i++ {
+ char := ident[i]
+ if char >= 'A' && char <= 'Z' {
+ break
+ }
+ }
+ parts = append(parts, normalize([]byte(ident[:i])))
+ return append(parts, FromPascal(ident[i:])...)
+}
+
+// FromKebab parses the given kebab-cased identifier into its parts.
+func FromKebab(ident string) Parts {
+ var (
+ elem []byte
+ parts Parts
+ )
+ for i := 0; i < len(ident); i++ {
+ char := ident[i]
+ if char == '-' {
+ if len(elem) == 0 {
+ continue
+ }
+ parts = append(parts, normalize(bytes.ToLower(elem)))
+ elem = []byte{}
+ } else {
+ elem = append(elem, char)
+ }
+ }
+ if len(elem) > 0 {
+ parts = append(parts, normalize(bytes.ToLower(elem)))
+ }
+ return parts
+}
+
+// FromPascal parses the given PascalCased identifier into its parts.
+func FromPascal(ident string) Parts {
+ var (
+ elem []byte
+ parts Parts
+ )
+ caps := true
+ for i := 0; i < len(ident); i++ {
+ char := ident[i]
+ if char >= 'A' && char <= 'Z' {
+ if caps {
+ elem = append(elem, char)
+ } else {
+ caps = true
+ parts = processLeftover(parts, elem)
+ elem = []byte{char}
+ }
+ } else if caps {
+ caps = false
+ elem = append(elem, char)
+ parts, elem = process(parts, elem)
+ } else {
+ elem = append(elem, char)
+ }
+ }
+ if len(elem) > 0 {
+ parts = processLeftover(parts, elem)
+ }
+ return parts
+}
+
+// FromScreamingSnake parses the given SCREAMING_SNAKE_CASED identifier into its
+// parts.
+func FromScreamingSnake(ident string) Parts {
+ return FromSnake(ident)
+}
+
+// FromSnake parses the given snake_cased identifier into its parts.
+func FromSnake(ident string) Parts {
+ var (
+ elem []byte
+ parts Parts
+ )
+ for i := 0; i < len(ident); i++ {
+ char := ident[i]
+ if char == '_' {
+ if len(elem) == 0 {
+ continue
+ }
+ parts = append(parts, normalize(bytes.ToLower(elem)))
+ elem = []byte{}
+ } else {
+ elem = append(elem, char)
+ }
+ }
+ if len(elem) > 0 {
+ parts = append(parts, normalize(bytes.ToLower(elem)))
+ }
+ return parts
+}
+
+func normalize(elem []byte) []byte {
+ if special, ok := mapping[string(bytes.ToUpper(elem))]; ok {
+ return []byte(special)
+ }
+ if len(elem) > 0 && 'a' <= elem[0] && elem[0] <= 'z' {
+ elem[0] -= 32
+ }
+ return elem
+}
+
+func process(parts Parts, elem []byte) (Parts, []byte) {
+ var nelem []byte
+ // Try to match exactly.
+ if special, ok := mapping[string(bytes.ToUpper(elem))]; ok {
+ return append(parts, []byte(special)), nil
+ }
+ // Try to match from the end for the longest identifier with a non-uppercase
+ // suffix.
+ last := ""
+ pos := -1
+ for i := len(elem) - 1; i >= 0; i-- {
+ if special, ok := mapping[string(bytes.ToUpper(elem[i:]))]; ok {
+ last = special
+ pos = i
+ }
+ }
+ if pos == -1 {
+ nelem = elem[len(elem)-2:]
+ elem = elem[:len(elem)-2]
+ } else {
+ elem = elem[:pos]
+ }
+ // Try to find the longest matches from the start.
+ for len(elem) > 0 {
+ match := ""
+ pos := -1
+ for i := 0; i <= len(elem); i++ {
+ if special, ok := mapping[string(bytes.ToUpper(elem[:i]))]; ok {
+ match = special
+ pos = i
+ }
+ }
+ if pos == -1 {
+ parts = append(parts, elem)
+ break
+ }
+ parts = append(parts, []byte(match))
+ elem = elem[pos:]
+ }
+ if len(last) > 0 {
+ parts = append(parts, []byte(last))
+ }
+ return parts, nelem
+}
+
+func processLeftover(parts Parts, elem []byte) Parts {
+ // Try to match exactly.
+ if special, ok := mapping[string(bytes.ToUpper(elem))]; ok {
+ return append(parts, []byte(special))
+ }
+ // Try to find the longest matches from the start.
+ for len(elem) > 0 {
+ match := ""
+ pos := -1
+ for i := 0; i <= len(elem); i++ {
+ if special, ok := mapping[string(bytes.ToUpper(elem[:i]))]; ok {
+ match = special
+ pos = i
+ }
+ }
+ if pos == -1 {
+ parts = append(parts, elem)
+ break
+ }
+ parts = append(parts, []byte(match))
+ elem = elem[pos:]
+ }
+ return parts
+}
diff --git a/pkg/ident/ident_test.go b/pkg/ident/ident_test.go
new file mode 100644
index 0000000..b9589fc
--- /dev/null
+++ b/pkg/ident/ident_test.go
@@ -0,0 +1,188 @@
+// Public Domain (-) 2020-present, The Core Authors.
+// See the Core UNLICENSE file for details.
+
+package ident
+
+import (
+ "strings"
+ "testing"
+)
+
+var spec = map[string]*definition{
+ "HTTPSServer": {
+ camel: "httpsServer",
+ kebab: "https-server",
+ },
+ "IDs": {
+ camel: "ids",
+ kebab: "ids",
+ },
+ "IDsMap": {
+ camel: "idsMap",
+ kebab: "ids-map",
+ },
+ "NetworkCIDR": {
+ camel: "networkCIDR",
+ kebab: "network-cidr",
+ },
+ "PeerAPIOp": {
+ camel: "peerAPIOp",
+ kebab: "peer-api-op",
+ },
+ "PeerIDs": {
+ camel: "peerIDs",
+ kebab: "peer-ids",
+ },
+ "ServiceAPIKey": {
+ camel: "serviceAPIKey",
+ kebab: "service-api-key",
+ },
+ "ServiceKey": {
+ camel: "serviceKey",
+ kebab: "service-key",
+ },
+ "UserACLIDs": {
+ camel: "userACLIDs",
+ kebab: "user-acl-ids",
+ },
+ "Username": {
+ camel: "username",
+ kebab: "username",
+ },
+ "XMLHTTP": {
+ camel: "xmlHTTP",
+ kebab: "xml-http",
+ },
+ "XMLHTTPRequest": {
+ camel: "xmlHTTPRequest",
+ kebab: "xml-http-request",
+ },
+}
+
+var tests = []testcase{
+ {"https-server", spec["HTTPSServer"]},
+ {"https-server-", spec["HTTPSServer"]},
+ {"-https-server", spec["HTTPSServer"]},
+ {"--https-server-", spec["HTTPSServer"]},
+ {"ids", spec["IDs"]},
+ {"ids-", spec["IDs"]},
+ {"-ids", spec["IDs"]},
+ {"--ids-", spec["IDs"]},
+ {"ids-map", spec["IDsMap"]},
+ {"ids-map-", spec["IDsMap"]},
+ {"-ids-map", spec["IDsMap"]},
+ {"--ids-map-", spec["IDsMap"]},
+ {"network-cidr", spec["NetworkCIDR"]},
+ {"network-cidr-", spec["NetworkCIDR"]},
+ {"-network-cidr", spec["NetworkCIDR"]},
+ {"--network-cidr-", spec["NetworkCIDR"]},
+ {"peer-api-op", spec["PeerAPIOp"]},
+ {"peer-api-op-", spec["PeerAPIOp"]},
+ {"-peer-api-op", spec["PeerAPIOp"]},
+ {"--peer-api-op-", spec["PeerAPIOp"]},
+ {"peer-ids", spec["PeerIDs"]},
+ {"peer-ids-", spec["PeerIDs"]},
+ {"-peer-ids", spec["PeerIDs"]},
+ {"--peer-ids-", spec["PeerIDs"]},
+ {"service-api-key", spec["ServiceAPIKey"]},
+ {"service-api-key-", spec["ServiceAPIKey"]},
+ {"-service-api-key", spec["ServiceAPIKey"]},
+ {"--service-api-key-", spec["ServiceAPIKey"]},
+ {"service-key", spec["ServiceKey"]},
+ {"service-key-", spec["ServiceKey"]},
+ {"-service-key", spec["ServiceKey"]},
+ {"--service-key-", spec["ServiceKey"]},
+ {"user-acl-ids", spec["UserACLIDs"]},
+ {"user-acl-ids-", spec["UserACLIDs"]},
+ {"-user-acl-ids", spec["UserACLIDs"]},
+ {"--user-acl-ids-", spec["UserACLIDs"]},
+ {"username", spec["Username"]},
+ {"username-", spec["Username"]},
+ {"-username", spec["Username"]},
+ {"--username-", spec["Username"]},
+ {"xml-http", spec["XMLHTTP"]},
+ {"xml-http-", spec["XMLHTTP"]},
+ {"-xml-http", spec["XMLHTTP"]},
+ {"--xml-http-", spec["XMLHTTP"]},
+ {"xml-http-request", spec["XMLHTTPRequest"]},
+ {"xml-http-request-", spec["XMLHTTPRequest"]},
+ {"-xml-http-request", spec["XMLHTTPRequest"]},
+ {"--xml-http-request-", spec["XMLHTTPRequest"]},
+}
+
+type definition struct {
+ camel string
+ kebab string
+ pascal string
+ screaming string
+ snake string
+}
+
+type testcase struct {
+ ident string
+ want *definition
+}
+
+func TestCamel(t *testing.T) {
+ for _, tt := range spec {
+ testConversion(t, "Camel", FromCamel, tt.camel, tt)
+ }
+}
+
+func TestKebab(t *testing.T) {
+ for _, tt := range tests {
+ testConversion(t, "Kebab", FromKebab, tt.ident, tt.want)
+ }
+}
+
+func TestPascal(t *testing.T) {
+ for _, tt := range spec {
+ testConversion(t, "Pascal", FromPascal, tt.pascal, tt)
+ }
+}
+
+func TestScreamingSnake(t *testing.T) {
+ for _, tt := range tests {
+ ident := strings.ToUpper(strings.ReplaceAll(tt.ident, "-", "_"))
+ testConversion(t, "Snake", FromSnake, ident, tt.want)
+ }
+}
+
+func TestSnake(t *testing.T) {
+ for _, tt := range tests {
+ ident := strings.ReplaceAll(tt.ident, "-", "_")
+ testConversion(t, "Snake", FromSnake, ident, tt.want)
+ }
+}
+
+func testConversion(t *testing.T, typ string, conv func(string) Parts, ident string, want *definition) {
+ id := conv(ident)
+ got := id.ToCamel()
+ if got != want.camel {
+ t.Errorf("From%s(%q).ToCamel() = %q: want %q", typ, ident, got, want.camel)
+ }
+ got = id.ToKebab()
+ if got != want.kebab {
+ t.Errorf("From%s(%q).ToKebab() = %q: want %q", typ, ident, got, want.kebab)
+ }
+ got = id.ToPascal()
+ if got != want.pascal {
+ t.Errorf("From%s(%q).ToPascal() = %q: want %q", typ, ident, got, want.pascal)
+ }
+ got = id.ToScreamingSnake()
+ if got != want.screaming {
+ t.Errorf("From%s(%q).ToScreamingSnake() = %q: want %q", typ, ident, got, want.screaming)
+ }
+ got = id.ToSnake()
+ if got != want.snake {
+ t.Errorf("From%s(%q).ToSnake() = %q: want %q", typ, ident, got, want.snake)
+ }
+}
+
+func init() {
+ for pascal, definition := range spec {
+ definition.pascal = pascal
+ definition.snake = strings.ReplaceAll(definition.kebab, "-", "_")
+ definition.screaming = strings.ToUpper(definition.snake)
+ }
+}
diff --git a/pkg/ident/initialism.go b/pkg/ident/initialism.go
new file mode 100644
index 0000000..f4ee3c4
--- /dev/null
+++ b/pkg/ident/initialism.go
@@ -0,0 +1,142 @@
+// Public Domain (-) 2018-present, The Core Authors.
+// See the Core UNLICENSE file for details.
+
+package ident
+
+import (
+ "strings"
+)
+
+var mapping = map[string]string{}
+
+// This list helps us satisfy the recommended naming style of variables in Go:
+// https://github.com/golang/go/wiki/CodeReviewComments#initialisms
+//
+// The list is always going to be incomplete, so please add to it as we come
+// across new initialisms.
+var initialisms = []string{
+ "ACK",
+ "ACL",
+ "ACLs",
+ "AES",
+ "ANSI",
+ "API",
+ "APIs",
+ "ARP",
+ "ASCII",
+ "ASN1",
+ "ATM",
+ "BGP",
+ "BIOS",
+ "BLAKE",
+ "BLAKE3",
+ "BSS",
+ "CA",
+ "CIDR",
+ "CLI",
+ "CLUI",
+ "CPU",
+ "CPUs",
+ "CRC",
+ "CSRF",
+ "CSS",
+ "CSV",
+ "DB",
+ "DBs",
+ "DHCP",
+ "DNS",
+ "DRM",
+ "EOF",
+ "EON",
+ "FTP",
+ "GRPC",
+ "GUID",
+ "GUIDs",
+ "HCL",
+ "HTML",
+ "HTTP",
+ "HTTPS",
+ "IANA",
+ "ICMP",
+ "ID",
+ "IDs",
+ "IEEE",
+ "IMAP",
+ "IP",
+ "IPs",
+ "IRC",
+ "ISO",
+ "ISP",
+ "JSON",
+ "LAN",
+ "LHS",
+ "MAC",
+ "MD5",
+ "MTU",
+ "NATO",
+ "NIC",
+ "NVRAM",
+ "OSI",
+ "PEM",
+ "POP3",
+ "QPS",
+ "QUIC",
+ "RAM",
+ "RFC",
+ "RFCs",
+ "RHS",
+ "RPC",
+ "SFTP",
+ "SHA",
+ "SHA1",
+ "SHA256",
+ "SHA512",
+ "SLA",
+ "SMTP",
+ "SQL",
+ "SRAM",
+ "SSH",
+ "SSID",
+ "SSL",
+ "SYN",
+ "TCP",
+ "TLS",
+ "TOML",
+ "TPS",
+ "TTL",
+ "UDP",
+ "UI",
+ "UID",
+ "UIDs",
+ "URI",
+ "URL",
+ "USB",
+ "UTF8",
+ "UUID",
+ "UUIDs",
+ "VLAN",
+ "VM",
+ "VPN",
+ "W3C",
+ "WPA",
+ "WiFi",
+ "XML",
+ "XMPP",
+ "XON",
+ "XSRF",
+ "XSS",
+ "YAML",
+}
+
+// AddInitialism adds the given identifier to the set of initialisms. The given
+// identifier should be in the PascalCase form and have at most one lower-cased
+// letter which must be at the very end.
+func AddInitialism(ident string) {
+ mapping[strings.ToUpper(ident)] = ident
+}
+
+func init() {
+ for _, s := range initialisms {
+ mapping[strings.ToUpper(s)] = s
+ }
+}
|
espra/espra | eb5a2f5ae2e3c18b460689dd9808f6e8290abde6 | cmd/genasm: add state support across vector registers and stack | diff --git a/cmd/genasm/asm/alloc.go b/cmd/genasm/asm/alloc.go
new file mode 100644
index 0000000..a2aa58b
--- /dev/null
+++ b/cmd/genasm/asm/alloc.go
@@ -0,0 +1,193 @@
+package asm
+
+import (
+ "fmt"
+ "runtime"
+
+ "github.com/mmcloughlin/avo/operand"
+ "github.com/mmcloughlin/avo/reg"
+)
+
+// Alloc maintains state across physical vector registers and the stack.
+type Alloc struct {
+ ctr int
+ ctx *Context
+ m operand.Mem
+ mslot int
+ n int
+ phys []reg.VecPhysical
+ regs mem
+ span int
+ spills int
+ stack mem
+ values map[int]*Value
+}
+
+// Free prints any values that have leaked.
+func (a *Alloc) Free() {
+ for id, v := range a.values {
+ fmt.Println("leaked value:", id, "==", v.id, "\n", v.stack)
+ }
+}
+
+// FreeReg returns a free register, or -1 if none are available.
+func (a *Alloc) FreeReg() int {
+ n, ok := a.regs.alloc(a.n)
+ if !ok {
+ return -1
+ }
+ a.regs.free(n)
+ return n
+}
+
+// Stats prints the current allocation stats and returns a function that should
+// be deferred to print the final allocation stats.
+func (a *Alloc) Stats(name string) func() {
+ a.stats(name, "in")
+ return func() { a.stats(name, "out") }
+}
+
+// Value creates a fresh value.
+func (a *Alloc) Value() *Value {
+ var buf [4096]byte
+ a.ctr++
+ v := &Value{
+ a: a,
+ age: a.ctr,
+ id: a.ctr,
+ reg: -1,
+ stack: string(buf[:runtime.Stack(buf[:], false)]),
+ state: stateEmpty{},
+ }
+ a.values[v.id] = v
+ return v
+}
+
+// ValueFrom creates a value that is lazily loaded from the given source.
+func (a *Alloc) ValueFrom(m operand.Mem) *Value {
+ v := a.Value()
+ v.state = stateLazy{mem: m}
+ return v
+}
+
+// ValueWith creates a value that is lazily loaded with the given source.
+func (a *Alloc) ValueWith(m operand.Mem) *Value {
+ v := a.Value()
+ v.state = stateLazy{broadcast: true, mem: m}
+ return v
+}
+
+// Values creates a slice of fresh values.
+func (a *Alloc) Values(n int) []*Value {
+ out := make([]*Value, n)
+ for i := range out {
+ out[i] = a.Value()
+ }
+ return out
+}
+
+// ValuesWith creates a slice of values that are lazily loaded with the given
+// source. The given sizeof in bits, determines the memory offset used for each
+// of the slice elements.
+func (a *Alloc) ValuesWith(n int, m operand.Mem, sizeof int) []*Value {
+ size := sizeof / 8
+ out := make([]*Value, n)
+ for i := range out {
+ out[i] = a.ValueWith(m.Offset(size * i))
+ }
+ return out
+}
+
+func (a *Alloc) allocReg(except *Value) int {
+ reg, ok := a.regs.alloc(a.n)
+ if ok {
+ return reg
+ }
+ oldest := a.findOldestLive(except)
+ state := oldest.state.(stateLive)
+ oldest.displaceTo(a.allocSpot())
+ a.regs[state.reg] = struct{}{}
+ return state.reg
+}
+
+func (a *Alloc) allocSpot() valueState {
+ reg, ok := a.regs.alloc(a.n)
+ if ok {
+ return a.newStateLive(reg)
+ }
+ slot := a.stack.mustAlloc()
+ a.spills++
+ if slot > a.mslot {
+ a.mslot = slot
+ }
+ return stateSpilled{
+ aligned: true,
+ mem: a.m,
+ slot: slot,
+ span: a.span,
+ }
+}
+
+func (a *Alloc) findOldestLive(except *Value) *Value {
+ var oldest *Value
+ for _, v := range a.values {
+ if oldest == except || !v.state.live() {
+ continue
+ }
+ if oldest == nil || v.age < oldest.age {
+ oldest = v
+ }
+ }
+ return oldest
+}
+
+func (a *Alloc) newStateLive(reg int) stateLive {
+ return stateLive{
+ phys: a.phys,
+ reg: reg,
+ }
+}
+
+func (a *Alloc) stats(name, when string) {
+ fmt.Printf("// [%s] %s: %d/%d free (%d total + %d spills + %d slots)\n",
+ name, when, a.n-len(a.regs), a.n, len(a.values), a.spills, a.mslot+1)
+}
+
+type mem map[int]struct{}
+
+func (m mem) alloc(max int) (n int, ok bool) {
+ for max == 0 || n < max {
+ if _, ok := m[n]; !ok {
+ m[n] = struct{}{}
+ return n, true
+ }
+ n++
+ }
+ return 0, false
+}
+
+func (m mem) free(n int) {
+ delete(m, n)
+}
+
+func (m mem) mustAlloc() (n int) {
+ n, ok := m.alloc(0)
+ if !ok {
+ panic("unable to alloc")
+ }
+ return n
+}
+
+func newAlloc(ctx *Context, base reg.Register, r RegisterSet) *Alloc {
+ return &Alloc{
+ ctx: ctx,
+ m: operand.Mem{Base: base},
+ mslot: -1,
+ n: r.n,
+ phys: r.registers,
+ regs: mem{},
+ span: r.size / 8,
+ stack: mem{},
+ values: map[int]*Value{},
+ }
+}
diff --git a/cmd/genasm/asm/asm.go b/cmd/genasm/asm/asm.go
index b4e43bc..1a3a119 100644
--- a/cmd/genasm/asm/asm.go
+++ b/cmd/genasm/asm/asm.go
@@ -1,41 +1,67 @@
package asm
import (
"github.com/mmcloughlin/avo/attr"
"github.com/mmcloughlin/avo/build"
"github.com/mmcloughlin/avo/operand"
+ "github.com/mmcloughlin/avo/reg"
)
// Context maintains state for incrementally building an avo File.
type Context struct {
*build.Context
- NOSPLIT attr.Attribute
+ DUPOK attr.Attribute
+ NEEDCTXT attr.Attribute
+ NOFRAME attr.Attribute
+ NOPROF attr.Attribute
+ NOPTR attr.Attribute
+ NOSPLIT attr.Attribute
+ REFLECTMETHOD attr.Attribute
+ RODATA attr.Attribute
+ TLSBSS attr.Attribute
+ TOPFRAME attr.Attribute
+ WRAPPER attr.Attribute
}
// DATA adds a data value to the active data section.
func (c *Context) DATA(offset int, v operand.Constant) {
c.AddDatum(offset, v)
}
// GLOBL declares a new static global data section with the given attributes.
func (c *Context) GLOBL(name string, a attr.Attribute) operand.Mem {
g := c.StaticGlobal(name)
c.DataAttributes(a)
return g
}
+// NewAlloc instantiates a new Alloc instance for the given vector register set.
+func (c *Context) NewAlloc(base reg.Register, r RegisterSet) *Alloc {
+ return newAlloc(c, base, r)
+}
+
// TEXT starts building a new function called name, with attributes a, and sets
// its signature (see SignatureExpr).
func (c *Context) TEXT(name string, a attr.Attribute, signature string) {
c.Function(name)
c.Attributes(a)
c.SignatureExpr(signature)
}
// NewContext initializes an empty build Context.
func NewContext() *Context {
return &Context{
- Context: build.NewContext(),
- NOSPLIT: attr.NOSPLIT,
+ Context: build.NewContext(),
+ DUPOK: attr.DUPOK,
+ NEEDCTXT: attr.NEEDCTXT,
+ NOFRAME: attr.NOFRAME,
+ NOPROF: attr.NOPROF,
+ NOPTR: attr.NOPTR,
+ NOSPLIT: attr.NOSPLIT,
+ REFLECTMETHOD: attr.REFLECTMETHOD,
+ RODATA: attr.RODATA,
+ TLSBSS: attr.TLSBSS,
+ TOPFRAME: attr.TOPFRAME,
+ WRAPPER: attr.WRAPPER,
}
}
diff --git a/cmd/genasm/asm/reg.go b/cmd/genasm/asm/reg.go
new file mode 100644
index 0000000..8f294ae
--- /dev/null
+++ b/cmd/genasm/asm/reg.go
@@ -0,0 +1,55 @@
+// Public Domain (-) 2020-present, The Core Authors.
+// See the Core UNLICENSE file for details.
+
+package asm
+
+import (
+ "github.com/mmcloughlin/avo/reg"
+)
+
+// XMM represents the registers introduced by SSE.
+var XMM = RegisterSet{
+ n: 16,
+ registers: []reg.VecPhysical{
+ reg.X0, reg.X1, reg.X2, reg.X3,
+ reg.X4, reg.X5, reg.X6, reg.X7,
+ reg.X8, reg.X9, reg.X10, reg.X11,
+ reg.X12, reg.X13, reg.X14, reg.X15,
+ },
+ size: 128,
+}
+
+// YMM represents the registers introduced by AVX.
+var YMM = RegisterSet{
+ n: 16,
+ registers: []reg.VecPhysical{
+ reg.Y0, reg.Y1, reg.Y2, reg.Y3,
+ reg.Y4, reg.Y5, reg.Y6, reg.Y7,
+ reg.Y8, reg.Y9, reg.Y10, reg.Y11,
+ reg.Y12, reg.Y13, reg.Y14, reg.Y15,
+ },
+ size: 256,
+}
+
+// ZMM represents the registers introduced by AVX512.
+var ZMM = RegisterSet{
+ n: 32,
+ registers: []reg.VecPhysical{
+ reg.Z0, reg.Z1, reg.Z2, reg.Z3,
+ reg.Z4, reg.Z5, reg.Z6, reg.Z7,
+ reg.Z8, reg.Z9, reg.Z10, reg.Z11,
+ reg.Z12, reg.Z13, reg.Z14, reg.Z15,
+ reg.Z16, reg.Z17, reg.Z18, reg.Z19,
+ reg.Z20, reg.Z21, reg.Z22, reg.Z23,
+ reg.Z24, reg.Z25, reg.Z26, reg.Z27,
+ reg.Z28, reg.Z29, reg.Z30, reg.Z31,
+ },
+ size: 512,
+}
+
+// RegisterSet represents a set of physical registers.
+type RegisterSet struct {
+ n int
+ registers []reg.VecPhysical
+ size int
+}
diff --git a/cmd/genasm/asm/value.go b/cmd/genasm/asm/value.go
new file mode 100644
index 0000000..853e8dd
--- /dev/null
+++ b/cmd/genasm/asm/value.go
@@ -0,0 +1,245 @@
+package asm
+
+import (
+ "fmt"
+
+ "github.com/mmcloughlin/avo/operand"
+ "github.com/mmcloughlin/avo/reg"
+)
+
+// Value represents a state value within Alloc.
+type Value struct {
+ a *Alloc
+ age int
+ id int
+ reg int // currently allocated register (sometimes dup'd in state)
+ stack string
+ state valueState
+}
+
+// Become assigns the value to the given register, displacing as necessary.
+func (v *Value) Become(reg int) {
+ if v.reg == reg {
+ return
+ }
+ if _, ok := v.a.regs[reg]; !ok {
+ v.a.regs[reg] = struct{}{}
+ v.displaceTo(v.a.newStateLive(reg))
+ return
+ }
+ for _, cand := range v.a.values {
+ if cand.reg != reg {
+ continue
+ }
+ state := cand.state
+ cand.displaceTo(cand.a.allocSpot())
+ v.displaceTo(state)
+ return
+ }
+}
+
+// Consume frees the value, and returns the register for the value â assigning a
+// register and loading data into it, if necessary.
+func (v *Value) Consume() reg.VecPhysical {
+ reg := v.Get()
+ v.free()
+ return reg
+}
+
+// ConsumeOp frees the value, and returns the location for its current state â
+// assigning a register and loading data into it, if necessary.
+func (v *Value) ConsumeOp() operand.Op {
+ op := v.GetOp()
+ v.free()
+ return op
+}
+
+// Get returns the register for the value. If the value is not already live,
+// then a register will be assigned and data will be loaded into it if
+// necessary.
+func (v *Value) Get() reg.VecPhysical {
+ v.touch()
+ switch state := v.state.(type) {
+ case stateEmpty:
+ v.alloc()
+ case stateLazy:
+ v.alloc()
+ if !state.broadcast {
+ v.a.ctx.VMOVDQU(state.mem, v.state.(stateLive).register())
+ } else {
+ v.a.ctx.VPBROADCASTD(state.mem, v.state.(stateLive).register())
+ }
+ case stateSpilled:
+ reg := v.alloc()
+ if state.aligned {
+ v.a.ctx.VMOVDQA(state.getMem(), v.a.phys[reg])
+ } else {
+ v.a.ctx.VMOVDQU(state.getMem(), v.a.phys[reg])
+ }
+ }
+ return v.state.(stateLive).register()
+}
+
+// GetOp returns the location of the state. If the value is not already live, or
+// not on the stack, or is broadcasted (repeated) from a source location, then a
+// register will be assigned and data will be loaded into it if necessary.
+func (v *Value) GetOp() operand.Op {
+ v.touch()
+ switch state := v.state.(type) {
+ case stateEmpty:
+ v.alloc()
+ case stateLazy:
+ if !state.broadcast {
+ return state.mem
+ }
+ reg := v.alloc()
+ v.a.ctx.VPBROADCASTD(state.mem, v.a.phys[reg])
+ case stateSpilled:
+ return state.getMem()
+ }
+ return v.state.(stateLive).register()
+}
+
+// HasReg returns whether the value has been assigned to a register.
+func (v *Value) HasReg() bool {
+ return v.reg >= 0
+}
+
+// Reg returns the offset for the value's register. The value will be assigned
+// to a register if that's not already the case.
+func (v *Value) Reg() int {
+ if v.reg < 0 {
+ v.reg = v.a.allocReg(v)
+ }
+ return v.reg
+}
+
+func (v *Value) String() string {
+ return fmt.Sprintf("Value(reg:%-2d state:%s)", v.reg, v.state)
+}
+
+func (v *Value) alloc() int {
+ reg := v.reg
+ if reg < 0 {
+ reg = v.a.allocReg(v)
+ }
+ v.setState(v.a.newStateLive(reg))
+ return reg
+}
+
+func (v *Value) displaceTo(dest valueState) {
+ if state, ok := dest.(stateSpilled); ok && state.aligned {
+ v.a.ctx.VMOVDQA(v.Get(), dest.op())
+ } else {
+ v.a.ctx.VMOVDQU(v.Get(), dest.op())
+ }
+ v.setState(dest)
+}
+
+func (v *Value) free() {
+ v.setState(nil)
+ delete(v.a.values, v.id)
+}
+
+func (v *Value) setState(state valueState) {
+ switch state := v.state.(type) {
+ case stateLive:
+ v.a.regs.free(state.reg)
+ v.reg = -1
+ case stateSpilled:
+ v.a.stack.free(state.slot)
+ }
+ v.state = state
+ switch state := state.(type) {
+ case stateLive:
+ v.a.regs[state.reg] = struct{}{}
+ v.reg = state.reg
+ case stateSpilled:
+ v.a.stack[state.slot] = struct{}{}
+ }
+}
+
+func (v *Value) touch() {
+ v.a.ctr++
+ v.age = v.a.ctr
+}
+
+type stateEmpty struct{}
+
+func (s stateEmpty) String() string {
+ return "Empty"
+}
+
+func (s stateEmpty) live() bool {
+ return false
+}
+
+func (s stateEmpty) op() operand.Op {
+ panic("no location for this state")
+}
+
+type stateLazy struct {
+ broadcast bool
+ mem operand.Mem
+}
+
+func (s stateLazy) String() string {
+ return fmt.Sprintf("Lazy(%t, %s)", s.broadcast, s.mem.Asm())
+}
+
+func (s stateLazy) live() bool {
+ return false
+}
+
+func (s stateLazy) op() operand.Op {
+ panic("no location for this state")
+}
+
+type stateLive struct {
+ phys []reg.VecPhysical
+ reg int
+}
+
+func (s stateLive) String() string {
+ return fmt.Sprintf("Live(%d)", s.reg)
+}
+
+func (s stateLive) live() bool {
+ return true
+}
+
+func (s stateLive) op() operand.Op {
+ return s.register()
+}
+
+func (s stateLive) register() reg.VecPhysical {
+ return s.phys[s.reg]
+}
+
+type stateSpilled struct {
+ aligned bool
+ mem operand.Mem
+ slot int
+ span int
+}
+
+func (s stateSpilled) String() string {
+ return fmt.Sprintf("Spilled(%d)", s.slot)
+}
+
+func (s stateSpilled) getMem() operand.Mem {
+ return s.mem.Offset(s.span * s.slot)
+}
+
+func (s stateSpilled) live() bool {
+ return false
+}
+
+func (s stateSpilled) op() operand.Op {
+ return s.getMem()
+}
+
+type valueState interface {
+ live() bool
+ op() operand.Op
+}
|
espra/espra | 5c5fbd67abe46c72abc34981db6663b05c9f1aa5 | doc/license: add license from the avo project | diff --git a/doc/license/avo/LICENSE b/doc/license/avo/LICENSE
new file mode 100644
index 0000000..c986d80
--- /dev/null
+++ b/doc/license/avo/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2018, Michael McLoughlin
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
espra/espra | 9c53f6cb3d2c34665e54493acb4005a7088bc8d9 | cmd/genasm: implement framework for generating asm files | diff --git a/THIRDPARTY.yaml b/THIRDPARTY.yaml
index ed9ebe7..3be78db 100644
--- a/THIRDPARTY.yaml
+++ b/THIRDPARTY.yaml
@@ -1,23 +1,31 @@
# This metadata file defines the copyright and licensing terms for files within
# Core ("The Work") which are based on works authored by third parties.
+avo:
+ sources:
+ - https://github.com/mmcloughlin/avo
+ terms:
+ - doc/license/avo/LICENSE
+ files:
+ - cmd/genasm/asm/asm.go
+
go:
sources:
- https://github.com/golang/crypto
- https://github.com/golang/sys
terms:
- doc/license/go/LICENSE
- doc/license/go/PATENTS
files:
- pkg/cpu/cpu_gc_x86.go
- pkg/cpu/cpu_gccgo_x86.c
- pkg/cpu/cpu_gccgo_x86.go
- pkg/cpu/cpu_x86.go
- pkg/cpu/cpu_x86.s
- pkg/crypto/hash.go
- pkg/kangaroo12/keccak.go
- pkg/kangaroo12/keccak_amd64.go
- pkg/kangaroo12/keccak_amd64.s
- pkg/kangaroo12/sponge.go
- pkg/kangaroo12/xor.go
- pkg/kangaroo12/xor_unaligned.go
diff --git a/cmd/genasm/asm/asm.go b/cmd/genasm/asm/asm.go
new file mode 100644
index 0000000..b4e43bc
--- /dev/null
+++ b/cmd/genasm/asm/asm.go
@@ -0,0 +1,41 @@
+package asm
+
+import (
+ "github.com/mmcloughlin/avo/attr"
+ "github.com/mmcloughlin/avo/build"
+ "github.com/mmcloughlin/avo/operand"
+)
+
+// Context maintains state for incrementally building an avo File.
+type Context struct {
+ *build.Context
+ NOSPLIT attr.Attribute
+}
+
+// DATA adds a data value to the active data section.
+func (c *Context) DATA(offset int, v operand.Constant) {
+ c.AddDatum(offset, v)
+}
+
+// GLOBL declares a new static global data section with the given attributes.
+func (c *Context) GLOBL(name string, a attr.Attribute) operand.Mem {
+ g := c.StaticGlobal(name)
+ c.DataAttributes(a)
+ return g
+}
+
+// TEXT starts building a new function called name, with attributes a, and sets
+// its signature (see SignatureExpr).
+func (c *Context) TEXT(name string, a attr.Attribute, signature string) {
+ c.Function(name)
+ c.Attributes(a)
+ c.SignatureExpr(signature)
+}
+
+// NewContext initializes an empty build Context.
+func NewContext() *Context {
+ return &Context{
+ Context: build.NewContext(),
+ NOSPLIT: attr.NOSPLIT,
+ }
+}
diff --git a/cmd/genasm/blake3/blake3.go b/cmd/genasm/blake3/blake3.go
new file mode 100644
index 0000000..3253d6b
--- /dev/null
+++ b/cmd/genasm/blake3/blake3.go
@@ -0,0 +1,16 @@
+// Public Domain (-) 2020-present, The Core Authors.
+// See the Core UNLICENSE file for details.
+
+package blake3
+
+import (
+ "dappui.com/cmd/genasm/pkg"
+)
+
+func init() {
+ pkg.Register("blake3", &pkg.Entry{
+ File: "hash_avx2",
+ Generator: genAVX2,
+ Stub: true,
+ })
+}
diff --git a/cmd/genasm/genasm.go b/cmd/genasm/genasm.go
new file mode 100644
index 0000000..61c4b34
--- /dev/null
+++ b/cmd/genasm/genasm.go
@@ -0,0 +1,21 @@
+// Public Domain (-) 2020-present, The Core Authors.
+// See the Core UNLICENSE file for details.
+
+// Command genasm generates Go assembly files for packages.
+package main
+
+import (
+ "fmt"
+ "os"
+
+ _ "dappui.com/cmd/genasm/blake3"
+ "dappui.com/cmd/genasm/pkg"
+)
+
+func main() {
+ if len(os.Args) == 1 {
+ fmt.Println("Usage: genasm PACKAGES ...")
+ os.Exit(0)
+ }
+ pkg.Generate(os.Args[1:])
+}
diff --git a/cmd/genasm/go.mod b/cmd/genasm/go.mod
new file mode 100644
index 0000000..ca6db2e
--- /dev/null
+++ b/cmd/genasm/go.mod
@@ -0,0 +1,5 @@
+module dappui.com/cmd/genasm
+
+go 1.13
+
+require github.com/mmcloughlin/avo v0.0.0-20200227021539-1859174b6275
diff --git a/cmd/genasm/go.sum b/cmd/genasm/go.sum
new file mode 100644
index 0000000..368247f
--- /dev/null
+++ b/cmd/genasm/go.sum
@@ -0,0 +1,12 @@
+github.com/mmcloughlin/avo v0.0.0-20200227021539-1859174b6275 h1:HWT+hP56uq/chzmwutJBI6/wEbUQTIBgKF28E+pIkdc=
+github.com/mmcloughlin/avo v0.0.0-20200227021539-1859174b6275/go.mod h1:L0u9qfRMLNBO97u6pPukRp6ncoQz0Q25W69fvtht3vA=
+golang.org/x/arch v0.0.0-20190909030613-46d78d1859ac/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190914235951-31e00f45c22e h1:nOOVVcLC+/3MeovP40q5lCiWmP1Z1DaN8yn8ngU63hw=
+golang.org/x/tools v0.0.0-20190914235951-31e00f45c22e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/cmd/genasm/pkg/pkg.go b/cmd/genasm/pkg/pkg.go
new file mode 100644
index 0000000..a59a739
--- /dev/null
+++ b/cmd/genasm/pkg/pkg.go
@@ -0,0 +1,138 @@
+// Public Domain (-) 2020-present, The Core Authors.
+// See the Core UNLICENSE file for details.
+
+package pkg
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "dappui.com/cmd/genasm/asm"
+ "github.com/mmcloughlin/avo/pass"
+ "github.com/mmcloughlin/avo/printer"
+)
+
+var (
+ indicators = []string{"blake3", "kangaroo12", "osexit"}
+ registry = map[string][]*Entry{}
+)
+
+// Entry represents the build configuration for a specific file in a package.
+type Entry struct {
+ Constraints string
+ File string
+ Generator func(*asm.Context)
+ Stub bool
+}
+
+// Generate will generate all the files for the given packages.
+func Generate(pkgs []string) {
+ root, err := findRoot()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "!! ERROR: Failed to find the root source directory:\n\n\t%s\n\n", err)
+ os.Exit(1)
+ }
+ for _, pkg := range pkgs {
+ if err := gen(root, pkg); err != nil {
+ fmt.Fprintf(os.Stderr, "!! ERROR: Failed to generate asm for pkg/%s:\n\n\t%s\n\n", pkg, err)
+ os.Exit(1)
+ }
+ }
+}
+
+// Register registers the given generator function with the
+func Register(pkg string, entry *Entry) {
+ registry[pkg] = append(registry[pkg], entry)
+}
+
+func findRoot() (string, error) {
+ dir, err := os.Getwd()
+ if err != nil {
+ return "", err
+ }
+ for {
+ failed := false
+ for _, indicator := range indicators {
+ path := filepath.Join(dir, "pkg", indicator)
+ _, err := os.Stat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ failed = true
+ continue
+ }
+ return "", err
+ }
+ }
+ if !failed {
+ return dir, nil
+ }
+ parent := filepath.Dir(dir)
+ if parent == dir {
+ break
+ }
+ dir = parent
+ }
+ return "", fmt.Errorf("could not find pkg directory with %s subdirectories", indicators)
+}
+
+func gen(root string, pkg string) error {
+ entries, ok := registry[pkg]
+ if !ok {
+ return fmt.Errorf("unable to find registry entry for %q", pkg)
+ }
+ for _, e := range entries {
+ if err := genFile(root, pkg, e); err != nil {
+ return fmt.Errorf("failed to generate %s: %s", e.File, err)
+ }
+ if e.Stub {
+ fmt.Printf(">> Successfully wrote pkg/%s/%s.go\n", pkg, e.File)
+ }
+ fmt.Printf(">> Successfully wrote pkg/%s/%s.s\n", pkg, e.File)
+ }
+ return nil
+}
+
+func genFile(root string, pkg string, e *Entry) error {
+ ctx := asm.NewContext()
+ ctx.Package("dappui.com/pkg/" + pkg)
+ if e.Constraints == "" {
+ e.Constraints = "amd64,!gccgo"
+ }
+ ctx.ConstraintExpr(e.Constraints)
+ e.Generator(ctx)
+ f, err := ctx.Result()
+ if err != nil {
+ return err
+ }
+ pcfg := printer.Config{
+ Argv: []string{"genasm", pkg},
+ Pkg: pkg,
+ }
+ out, err := os.Create(filepath.Join(root, "pkg", pkg, e.File+".s"))
+ if err != nil {
+ return err
+ }
+ passes := []pass.Interface{
+ pass.Compile,
+ &pass.Output{
+ Printer: printer.NewGoAsm(pcfg),
+ Writer: out,
+ },
+ }
+ if e.Stub {
+ stub, err := os.Create(filepath.Join(root, "pkg", pkg, e.File+".go"))
+ if err != nil {
+ return err
+ }
+ passes = append(passes, &pass.Output{
+ Printer: printer.NewStubs(pcfg),
+ Writer: stub,
+ })
+ }
+ p := pass.Concat(passes...)
+ if err := p.Execute(f); err != nil {
+ return err
+ }
+ return nil
+}
|
espra/espra | e71a128e4336100690555b598d59de2668e81ab3 | meta: elide types in composite literals | diff --git a/infra/provider/container/container.go b/infra/provider/container/container.go
index 0e9eb75..e3d5438 100644
--- a/infra/provider/container/container.go
+++ b/infra/provider/container/container.go
@@ -1,75 +1,75 @@
// Public Domain (-) 2020-present, The Core Authors.
// See the Core UNLICENSE file for details.
// Package container defines a resource for creating containers.
//
// The specified container will be built using docker and pushed to the repo.
package container
import (
"fmt"
"os/exec"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
)
// Resource returns the schema definition for the container resource.
func Resource() *schema.Resource {
return &schema.Resource{
Create: create,
Delete: schema.Noop,
Read: schema.Noop,
Schema: map[string]*schema.Schema{
- "image": &schema.Schema{
+ "image": {
Computed: true,
Type: schema.TypeString,
},
- "repo": &schema.Schema{
+ "repo": {
Required: true,
Type: schema.TypeString,
},
- "source": &schema.Schema{
+ "source": {
Required: true,
Type: schema.TypeString,
},
- "tag": &schema.Schema{
+ "tag": {
Required: true,
Type: schema.TypeString,
},
},
Update: update,
}
}
func build(repo string, d *schema.ResourceData, meta interface{}) error {
source := d.Get("source").(string)
tag := d.Get("tag").(string)
image := repo + ":" + tag
cmd := exec.Command("docker", "build", "-t", image, ".")
cmd.Dir = source
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("container: failed to build the %q image:\n\n%s", image, string(out))
}
cmd = exec.Command("docker", "push", image)
out, err = cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("container: failed to push the %q image:\n\n%s", image, string(out))
}
d.Set("image", image)
return nil
}
func create(d *schema.ResourceData, meta interface{}) error {
repo := d.Get("repo").(string)
if err := build(repo, d, nil); err != nil {
return err
}
d.SetId(repo)
return nil
}
func update(d *schema.ResourceData, meta interface{}) error {
repo := d.Get("repo").(string)
return build(repo, d, nil)
}
diff --git a/infra/provider/sourcehash/sourcehash.go b/infra/provider/sourcehash/sourcehash.go
index 8880db3..c59226a 100644
--- a/infra/provider/sourcehash/sourcehash.go
+++ b/infra/provider/sourcehash/sourcehash.go
@@ -1,101 +1,101 @@
// Public Domain (-) 2020-present, The Core Authors.
// See the Core UNLICENSE file for details.
// Package sourcehash defines a datasource for hashing source paths.
//
// The idea for this datasource is inspired by Dragan Milic's approach in:
// https://github.com/draganm/terraform-provider-linuxbox
package sourcehash
import (
"crypto/sha512"
"encoding/hex"
"fmt"
"io"
"os"
"sort"
"dappui.com/pkg/sys"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
)
var (
fs = sys.OSFileSystem
newline = []byte{'\n'}
)
// Resource returns the schema definition for the sourcehash datasource.
func Resource() *schema.Resource {
return &schema.Resource{
Read: read,
Schema: map[string]*schema.Schema{
- "digest": &schema.Schema{
+ "digest": {
Computed: true,
Type: schema.TypeString,
},
- "paths": &schema.Schema{
+ "paths": {
Elem: &schema.Schema{
Type: schema.TypeString,
},
Required: true,
Type: schema.TypeSet,
},
},
}
}
func read(d *schema.ResourceData, meta interface{}) error {
id := sha512.New512_256()
seen := map[string]struct{}{}
for _, elem := range d.Get("paths").(*schema.Set).List() {
path := elem.(string)
id.Write([]byte(path))
id.Write(newline)
info, err := fs.Lstat(path)
if err != nil {
return fmt.Errorf("sourcehash: failed to stat path %q: %s", path, err)
}
if !info.IsDir() {
seen[path] = struct{}{}
continue
}
if err := fs.Walk(path, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
seen[path] = struct{}{}
}
return nil
}); err != nil {
return fmt.Errorf("sourcehash: failed to navigate path %q: %s", path, err)
}
}
files := []string{}
for file := range seen {
files = append(files, file)
}
sort.Strings(files)
buf := make([]byte, 65536)
hasher := sha512.New512_256()
for _, file := range files {
hasher.Write([]byte(file))
hasher.Write(newline)
f, err := fs.Open(file)
if err != nil {
return fmt.Errorf("sourcehash: failed to open file %q: %s", file, err)
}
_, err = io.CopyBuffer(hasher, f, buf)
if err != nil {
f.Close()
return fmt.Errorf("sourcehash: failed to hash file %q: %s", file, err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("sourcehash: failed to close file %q: %s", file, err)
}
hasher.Write(newline)
}
d.Set("digest", hex.EncodeToString(hasher.Sum(nil)))
d.SetId(hex.EncodeToString(id.Sum(nil)))
return nil
}
diff --git a/pkg/mockfs/mockfs.go b/pkg/mockfs/mockfs.go
index bb84f44..f33a774 100644
--- a/pkg/mockfs/mockfs.go
+++ b/pkg/mockfs/mockfs.go
@@ -1,272 +1,272 @@
// Public Domain (-) 2020-present, The Core Authors.
// See the Core UNLICENSE file for details.
// Package mockfs mocks interactions with the filesystem.
package mockfs
import (
"bytes"
"errors"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"dappui.com/pkg/sys"
)
var (
errCloseFailure = errors.New("mockfs: failed to close file")
errOpenFailure = errors.New("mockfs: failed to open file")
errReadFailure = errors.New("mockfs: failed to read file")
errStatFailure = errors.New("mockfs: failed to stat file")
)
// File provides a mock implementation of the sys.File interface.
type File struct {
closed bool
data *bytes.Buffer
failClose bool
failRead bool
mu sync.Mutex // protects closed
}
// Close implements the interface for sys.File.
func (f *File) Close() error {
f.mu.Lock()
defer f.mu.Unlock()
if f.closed {
return os.ErrClosed
}
if f.failClose {
return errCloseFailure
}
f.closed = true
return nil
}
// Read implements the interface for sys.File.
func (f *File) Read(p []byte) (n int, err error) {
f.mu.Lock()
defer f.mu.Unlock()
if f.closed {
return 0, os.ErrClosed
}
if f.failRead {
return 0, errReadFailure
}
return f.data.Read(p)
}
// FileInfo provides a mock implementation of the os.FileInfo interface.
type FileInfo struct {
data string
dir bool
failClose bool
failOpen bool
failRead bool
failStat bool
name string
}
// FailClose will mark a file to fail when its Close method is called.
func (f *FileInfo) FailClose() *FileInfo {
f.failClose = true
return f
}
// FailOpen will return an error for Open calls at the current path.
func (f *FileInfo) FailOpen() *FileInfo {
f.failOpen = true
return f
}
// FailRead will mark a file to fail when its Read method is called.
func (f *FileInfo) FailRead() *FileInfo {
f.failRead = true
return f
}
// FailStat will return an error for Lstat/Stat calls at the current path.
func (f *FileInfo) FailStat() *FileInfo {
f.failStat = true
return f
}
// IsDir implements the interface for os.FileInfo.
func (f *FileInfo) IsDir() bool {
return f.dir
}
// ModTime implements the interface for os.FileInfo.
func (f *FileInfo) ModTime() time.Time {
return time.Time{}
}
// Mode implements the interface for os.FileInfo.
func (f *FileInfo) Mode() os.FileMode {
if f.dir {
return os.ModeDir
}
return 0
}
// Name implements the interface for os.FileInfo.
func (f *FileInfo) Name() string {
return f.name
}
// Size implements the interface for os.FileInfo.
func (f *FileInfo) Size() int64 {
return int64(len(f.data))
}
// Sys implements the interface for os.FileInfo.
func (f *FileInfo) Sys() interface{} {
return f.name
}
// FileSystem provides a configurable mock implementation of the sys.FileSystem
// interface.
type FileSystem struct {
files map[string]*FileInfo
mu sync.RWMutex // protects files
}
// Lstat implements the interface for sys.Filesystem.
func (f *FileSystem) Lstat(path string) (os.FileInfo, error) {
return f.Stat(path)
}
// Mkdir creates a directory at the given path, along with any necessary parent
// directories. If a directory already exists, then the method will exit early.
//
// WriteFile implicitly creates directories, so this method is only really
// useful for creating empty directories.
func (f *FileSystem) Mkdir(path string) *FileInfo {
f.mu.Lock()
defer f.mu.Unlock()
path = filepath.Clean("/" + path)
if info, ok := f.files[path]; ok {
return info
}
dir, name := filepath.Split(path)
info := &FileInfo{
dir: true,
name: name,
}
f.files[path] = info
path = dir
for path != "/" {
path = filepath.Clean(path)
if _, ok := f.files[path]; ok {
break
}
dir, name := filepath.Split(path)
f.files[path] = &FileInfo{
dir: true,
name: name,
}
path = dir
}
return info
}
// Open implements the interface for sys.Filesystem.
func (f *FileSystem) Open(path string) (sys.File, error) {
path = filepath.Clean("/" + path)
f.mu.RLock()
info, ok := f.files[path]
f.mu.RUnlock()
if !ok {
return nil, os.ErrNotExist
}
if info.failOpen {
return nil, errOpenFailure
}
if info.failStat {
return nil, errStatFailure
}
return &File{
data: bytes.NewBufferString(info.data),
failClose: info.failClose,
failRead: info.failRead,
}, nil
}
// Stat implements the interface for sys.Filesystem.
func (f *FileSystem) Stat(path string) (os.FileInfo, error) {
path = filepath.Clean("/" + path)
f.mu.RLock()
info, ok := f.files[path]
f.mu.RUnlock()
if !ok {
return nil, os.ErrNotExist
}
if info.failStat {
return nil, errStatFailure
}
return info, nil
}
// Walk implements the interface for sys.Filesystem.
func (f *FileSystem) Walk(root string, walkFn filepath.WalkFunc) error {
f.mu.RLock()
defer f.mu.RUnlock()
paths := []string{}
root = filepath.Clean("/" + root)
dir := root + "/"
if root == "/" {
dir = "/"
}
for path := range f.files {
if path == root || strings.HasPrefix(path, dir) {
paths = append(paths, path)
}
}
sort.Strings(paths)
var err error
for _, path := range paths {
info := f.files[path]
if info.failStat {
err = errStatFailure
}
// TODO(tav): Add support for filepath.SkipDir.
if err := walkFn(path, info, err); err != nil {
return err
}
err = nil
}
return nil
}
// WriteFile creates a file with the given data at the specified path. It will
// implicitly create any parent directories as necessary.
func (f *FileSystem) WriteFile(path string, data string) *FileInfo {
f.mu.Lock()
path = filepath.Clean("/" + path)
dir, name := filepath.Split(path)
info := &FileInfo{
data: data,
name: name,
}
f.files[path] = info
f.mu.Unlock()
if dir != "/" {
f.Mkdir(dir)
}
return info
}
// New returns a mockable filesystem for testing purposes.
func New() *FileSystem {
return &FileSystem{
files: map[string]*FileInfo{
- "/": &FileInfo{
+ "/": {
dir: true,
},
},
}
}
|
espra/espra | e2488d23d6e1be9ff0cc06803f6c916b27e0f604 | COPYING: add dummy file to satisfy pkg.go.dev | diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..672d6b3
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,125 @@
+See UNLICENSE.md for the Core Unlicense. It is a superset of CC0.
+
+-------------------------------------------------------------------------------
+
+Creative Commons Legal Code
+
+CC0 1.0 Universal
+
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
+ LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
+ ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
+ INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
+ REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
+ PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
+ THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
+ HEREUNDER.
+
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator
+and subsequent owner(s) (each and all, an "owner") of an original work of
+authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for
+the purpose of contributing to a commons of creative, cultural and
+scientific works ("Commons") that the public can reliably and without fear
+of later claims of infringement build upon, modify, incorporate in other
+works, reuse and redistribute as freely as possible in any form whatsoever
+and for any purposes, including without limitation commercial purposes.
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural and scientific
+works, or to gain reputation or greater distribution for their Work in
+part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any
+expectation of additional consideration or compensation, the person
+associating CC0 with a Work (the "Affirmer"), to the extent that he or she
+is an owner of Copyright and Related Rights in the Work, voluntarily
+elects to apply CC0 to the Work and publicly distribute the Work under its
+terms, with knowledge of his or her Copyright and Related Rights in the
+Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be
+protected by copyright and related or neighboring rights ("Copyright and
+Related Rights"). Copyright and Related Rights include, but are not
+limited to, the following:
+
+ i. the right to reproduce, adapt, distribute, perform, display,
+ communicate, and translate a Work;
+ ii. moral rights retained by the original author(s) and/or performer(s);
+iii. publicity and privacy rights pertaining to a person's image or
+ likeness depicted in a Work;
+ iv. rights protecting against unfair competition in regards to a Work,
+ subject to the limitations in paragraph 4(a), below;
+ v. rights protecting the extraction, dissemination, use and reuse of data
+ in a Work;
+ vi. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation
+ thereof, including any amended or successor version of such
+ directive); and
+vii. other similar, equivalent or corresponding rights throughout the
+ world based on applicable law or treaty, and any national
+ implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention
+of, applicable law, Affirmer hereby overtly, fully, permanently,
+irrevocably and unconditionally waives, abandons, and surrenders all of
+Affirmer's Copyright and Related Rights and associated claims and causes
+of action, whether now known or unknown (including existing as well as
+future claims and causes of action), in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or
+treaty (including future time extensions), (iii) in any current or future
+medium and for any number of copies, and (iv) for any purpose whatsoever,
+including without limitation commercial, advertising or promotional
+purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
+member of the public at large and to the detriment of Affirmer's heirs and
+successors, fully intending that such Waiver shall not be subject to
+revocation, rescission, cancellation, termination, or any other legal or
+equitable action to disrupt the quiet enjoyment of the Work by the public
+as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason
+be judged legally invalid or ineffective under applicable law, then the
+Waiver shall be preserved to the maximum extent permitted taking into
+account Affirmer's express Statement of Purpose. In addition, to the
+extent the Waiver is so judged Affirmer hereby grants to each affected
+person a royalty-free, non transferable, non sublicensable, non exclusive,
+irrevocable and unconditional license to exercise Affirmer's Copyright and
+Related Rights in the Work (i) in all territories worldwide, (ii) for the
+maximum duration provided by applicable law or treaty (including future
+time extensions), (iii) in any current or future medium and for any number
+of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising or promotional purposes (the
+"License"). The License shall be deemed effective as of the date CC0 was
+applied by Affirmer to the Work. Should any part of the License for any
+reason be judged legally invalid or ineffective under applicable law, such
+partial invalidity or ineffectiveness shall not invalidate the remainder
+of the License, and in such case Affirmer hereby affirms that he or she
+will not (i) exercise any of his or her remaining Copyright and Related
+Rights in the Work or (ii) assert any associated claims and causes of
+action with respect to the Work, in either case contrary to Affirmer's
+express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
+ surrendered, licensed or otherwise affected by this document.
+ b. Affirmer offers the Work as-is and makes no representations or
+ warranties of any kind concerning the Work, express, implied,
+ statutory or otherwise, including without limitation warranties of
+ title, merchantability, fitness for a particular purpose, non
+ infringement, or the absence of latent or other defects, accuracy, or
+ the present or absence of errors, whether or not discoverable, all to
+ the greatest extent permissible under applicable law.
+ c. Affirmer disclaims responsibility for clearing rights of other persons
+ that may apply to the Work or any use thereof, including without
+ limitation any person's Copyright and Related Rights in the Work.
+ Further, Affirmer disclaims responsibility for obtaining any necessary
+ consents, permissions or other rights required for any use of the
+ Work.
+ d. Affirmer understands and acknowledges that Creative Commons is not a
+ party to this document and has no duty or obligation with respect to
+ this CC0 or use of the Work.
|
espra/espra | 032282923b8c05d093be13ed591410899ea9f937 | doc/license: add CC0 1.0 legalcode | diff --git a/doc/license/cc0/LEGALCODE b/doc/license/cc0/LEGALCODE
new file mode 100644
index 0000000..0e259d4
--- /dev/null
+++ b/doc/license/cc0/LEGALCODE
@@ -0,0 +1,121 @@
+Creative Commons Legal Code
+
+CC0 1.0 Universal
+
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
+ LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
+ ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
+ INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
+ REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
+ PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
+ THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
+ HEREUNDER.
+
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator
+and subsequent owner(s) (each and all, an "owner") of an original work of
+authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for
+the purpose of contributing to a commons of creative, cultural and
+scientific works ("Commons") that the public can reliably and without fear
+of later claims of infringement build upon, modify, incorporate in other
+works, reuse and redistribute as freely as possible in any form whatsoever
+and for any purposes, including without limitation commercial purposes.
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural and scientific
+works, or to gain reputation or greater distribution for their Work in
+part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any
+expectation of additional consideration or compensation, the person
+associating CC0 with a Work (the "Affirmer"), to the extent that he or she
+is an owner of Copyright and Related Rights in the Work, voluntarily
+elects to apply CC0 to the Work and publicly distribute the Work under its
+terms, with knowledge of his or her Copyright and Related Rights in the
+Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be
+protected by copyright and related or neighboring rights ("Copyright and
+Related Rights"). Copyright and Related Rights include, but are not
+limited to, the following:
+
+ i. the right to reproduce, adapt, distribute, perform, display,
+ communicate, and translate a Work;
+ ii. moral rights retained by the original author(s) and/or performer(s);
+iii. publicity and privacy rights pertaining to a person's image or
+ likeness depicted in a Work;
+ iv. rights protecting against unfair competition in regards to a Work,
+ subject to the limitations in paragraph 4(a), below;
+ v. rights protecting the extraction, dissemination, use and reuse of data
+ in a Work;
+ vi. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation
+ thereof, including any amended or successor version of such
+ directive); and
+vii. other similar, equivalent or corresponding rights throughout the
+ world based on applicable law or treaty, and any national
+ implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention
+of, applicable law, Affirmer hereby overtly, fully, permanently,
+irrevocably and unconditionally waives, abandons, and surrenders all of
+Affirmer's Copyright and Related Rights and associated claims and causes
+of action, whether now known or unknown (including existing as well as
+future claims and causes of action), in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or
+treaty (including future time extensions), (iii) in any current or future
+medium and for any number of copies, and (iv) for any purpose whatsoever,
+including without limitation commercial, advertising or promotional
+purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
+member of the public at large and to the detriment of Affirmer's heirs and
+successors, fully intending that such Waiver shall not be subject to
+revocation, rescission, cancellation, termination, or any other legal or
+equitable action to disrupt the quiet enjoyment of the Work by the public
+as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason
+be judged legally invalid or ineffective under applicable law, then the
+Waiver shall be preserved to the maximum extent permitted taking into
+account Affirmer's express Statement of Purpose. In addition, to the
+extent the Waiver is so judged Affirmer hereby grants to each affected
+person a royalty-free, non transferable, non sublicensable, non exclusive,
+irrevocable and unconditional license to exercise Affirmer's Copyright and
+Related Rights in the Work (i) in all territories worldwide, (ii) for the
+maximum duration provided by applicable law or treaty (including future
+time extensions), (iii) in any current or future medium and for any number
+of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising or promotional purposes (the
+"License"). The License shall be deemed effective as of the date CC0 was
+applied by Affirmer to the Work. Should any part of the License for any
+reason be judged legally invalid or ineffective under applicable law, such
+partial invalidity or ineffectiveness shall not invalidate the remainder
+of the License, and in such case Affirmer hereby affirms that he or she
+will not (i) exercise any of his or her remaining Copyright and Related
+Rights in the Work or (ii) assert any associated claims and causes of
+action with respect to the Work, in either case contrary to Affirmer's
+express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
+ surrendered, licensed or otherwise affected by this document.
+ b. Affirmer offers the Work as-is and makes no representations or
+ warranties of any kind concerning the Work, express, implied,
+ statutory or otherwise, including without limitation warranties of
+ title, merchantability, fitness for a particular purpose, non
+ infringement, or the absence of latent or other defects, accuracy, or
+ the present or absence of errors, whether or not discoverable, all to
+ the greatest extent permissible under applicable law.
+ c. Affirmer disclaims responsibility for clearing rights of other persons
+ that may apply to the Work or any use thereof, including without
+ limitation any person's Copyright and Related Rights in the Work.
+ Further, Affirmer disclaims responsibility for obtaining any necessary
+ consents, permissions or other rights required for any use of the
+ Work.
+ d. Affirmer understands and acknowledges that Creative Commons is not a
+ party to this document and has no duty or obligation with respect to
+ this CC0 or use of the Work.
|
tomtt/cucumber-skin | 7b69ba4089d3b773e224ed67037cd2ab5ba9f826 | Fixed specs not running by unshifting dir onto path | diff --git a/bin/cucumber-skin b/bin/cucumber-skin
index e6433e1..3ce0c46 100755
--- a/bin/cucumber-skin
+++ b/bin/cucumber-skin
@@ -1,10 +1,12 @@
#!/usr/bin/env ruby
# Add '.rb' to work around a bug in IronRuby's File#dirname
_cuke_skin_dir = File.dirname(__FILE__ + '.rb') + '/../lib'
-$:.unshift(_cuke_skin_dir) unless $:.include?(_cuke_skin_dir)
+$:.unshift(_cuke_skin_dir) unless
+ $:.include?(_cuke_skin_dir) ||
+ $:.include?(File.expand_path(_cuke_skin_dir))
require 'cucumber-skin/cli/main'
CucumberSkin::Cli::Main.execute(ARGV.dup)
# EOF
diff --git a/lib/cucumber-skin.rb b/lib/cucumber-skin.rb
index 57218f3..446b655 100644
--- a/lib/cucumber-skin.rb
+++ b/lib/cucumber-skin.rb
@@ -1,49 +1,53 @@
+_cuke_skin_dir = File.dirname(__FILE__ + '.rb') + '/../lib'
+$:.unshift(_cuke_skin_dir) unless
+ $:.include?(_cuke_skin_dir) ||
+ $:.include?(File.expand_path(_cuke_skin_dir))
module CucumberSkin
# :stopdoc:
VERSION = '0.0.1'
LIBPATH = ::File.expand_path(::File.dirname(__FILE__)) + ::File::SEPARATOR
PATH = ::File.dirname(LIBPATH) + ::File::SEPARATOR
# :startdoc:
# Returns the version string for the library.
#
def self.version
VERSION
end
# Returns the library path for the module. If any arguments are given,
# they will be joined to the end of the libray path using
# <tt>File.join</tt>.
#
def self.libpath( *args )
args.empty? ? LIBPATH : ::File.join(LIBPATH, args.flatten)
end
# Returns the lpath for the module. If any arguments are given,
# they will be joined to the end of the path using
# <tt>File.join</tt>.
#
def self.path( *args )
args.empty? ? PATH : ::File.join(PATH, args.flatten)
end
# Utility method used to require all files ending in .rb that lie in the
# directory below this file that has the same name as the filename passed
# in. Optionally, a specific _directory_ name can be passed in such that
# the _filename_ does not have to be equivalent to the directory.
#
def self.require_all_libs_relative_to( fname, dir = nil )
dir ||= ::File.basename(fname, '.*')
search_me = ::File.expand_path(
::File.join(::File.dirname(fname), dir, '**', '*.rb'))
Dir.glob(search_me).sort.each {|rb| require rb}
end
end # module CucumberSkin
CucumberSkin.require_all_libs_relative_to(__FILE__)
# EOF
diff --git a/lib/cucumber-skin/cli/main.rb b/lib/cucumber-skin/cli/main.rb
index 1335a05..ae4da97 100644
--- a/lib/cucumber-skin/cli/main.rb
+++ b/lib/cucumber-skin/cli/main.rb
@@ -1,10 +1,21 @@
require 'cucumber'
+require 'cucumber-skin/formatter/html_files'
+require 'cucumber-skin/cli/configuration'
module CucumberSkin
module Cli
class Main < Cucumber::Cli::Main
+ private
+
+ def configuration
+ return @configuration if @configuration
+
+ @configuration = Configuration.new(@out_stream, @error_stream)
+ @configuration.parse!(@args)
+ @configuration
+ end
end
end
end
CucumberSkin::Cli::Main.step_mother = self
diff --git a/lib/cucumber-skin/formatter/html_files.rb b/lib/cucumber-skin/formatter/html_files.rb
index 3d3228e..6a104e1 100644
--- a/lib/cucumber-skin/formatter/html_files.rb
+++ b/lib/cucumber-skin/formatter/html_files.rb
@@ -1,20 +1,21 @@
require 'cucumber'
+require 'cucumber-skin/gatherer/results_gatherer'
module CucumberSkin
module Formatter
class HtmlFiles
attr_accessor :options
def initialize(step_mother, io, options)
@gatherer = CucumberSkin::Gatherer::ResultsGatherer.new(step_mother, options)
@options = options
end
def visit_features(features)
@gatherer.visit_features(features)
puts "Done visiting features from HtmlFiles formatter"
end
end
end
end
|
tomtt/cucumber-skin | 883874b4811b9311cd802f38978fa42a035ec45a | Simplified cucumber-skin script, functionality put in same place as where cucumber had it | diff --git a/bin/cucumber-skin b/bin/cucumber-skin
index a1b3fa9..e6433e1 100755
--- a/bin/cucumber-skin
+++ b/bin/cucumber-skin
@@ -1,19 +1,10 @@
#!/usr/bin/env ruby
+# Add '.rb' to work around a bug in IronRuby's File#dirname
-require File.expand_path(
- File.join(File.dirname(__FILE__), %w[.. lib cucumber-skin]))
+_cuke_skin_dir = File.dirname(__FILE__ + '.rb') + '/../lib'
+$:.unshift(_cuke_skin_dir) unless $:.include?(_cuke_skin_dir)
-# Put your code here
-
-# cuke_dir = File.dirname(__FILE__ + '.rb') + '/../lib'
-cuke_dir = '/Users/tomtt/created/project/externals/cucumber/lib'
-$:.unshift(cuke_dir) unless $:.include?(cuke_dir)
-
-require 'cucumber/cli/main'
-
-unless ARGV.include?("-f") || ARGV.include?("--format")
- ARGV.unshift("-f", "CucumberSkin::Formatter::HtmlFiles")
-end
-Cucumber::Cli::Main.execute(ARGV.dup)
+require 'cucumber-skin/cli/main'
+CucumberSkin::Cli::Main.execute(ARGV.dup)
# EOF
diff --git a/lib/cucumber-skin/cli/configuration.rb b/lib/cucumber-skin/cli/configuration.rb
new file mode 100644
index 0000000..a240929
--- /dev/null
+++ b/lib/cucumber-skin/cli/configuration.rb
@@ -0,0 +1,14 @@
+require 'cucumber'
+
+module CucumberSkin
+ module Cli
+ class Configuration < Cucumber::Cli::Configuration
+ def parse!(args)
+ unless args.include?("-f") || args.include?("--format")
+ args.unshift("-f", "CucumberSkin::Formatter::HtmlFiles")
+ end
+ super
+ end
+ end
+ end
+end
diff --git a/lib/cucumber-skin/cli/main.rb b/lib/cucumber-skin/cli/main.rb
new file mode 100644
index 0000000..1335a05
--- /dev/null
+++ b/lib/cucumber-skin/cli/main.rb
@@ -0,0 +1,10 @@
+require 'cucumber'
+
+module CucumberSkin
+ module Cli
+ class Main < Cucumber::Cli::Main
+ end
+ end
+end
+
+CucumberSkin::Cli::Main.step_mother = self
diff --git a/spec/cucumber-skin/cli/configuration_spec.rb b/spec/cucumber-skin/cli/configuration_spec.rb
new file mode 100644
index 0000000..ff4db18
--- /dev/null
+++ b/spec/cucumber-skin/cli/configuration_spec.rb
@@ -0,0 +1,33 @@
+# Require the spec helper relative to this file
+require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
+
+require 'yaml'
+
+module CucumberSkin
+ module Cli
+ describe Configuration do
+ #
+ # def given_cucumber_yml_defined_as(hash_or_string)
+ # File.stub!(:exist?).and_return(true)
+ # cucumber_yml = hash_or_string.is_a?(Hash) ? hash_or_string.to_yaml : hash_or_string
+ # IO.stub!(:read).with('cucumber.yml').and_return(cucumber_yml)
+ # end
+ #
+ # before(:each) do
+ # Kernel.stub!(:exit).and_return(nil)
+ # end
+
+ it "should use the HtmlFiles formatter by default" do
+ config = Configuration.new
+ config.parse!(["-t", "committed"])
+ config.options[:formats].should == {'CucumberSkin::Formatter::HtmlFiles' => STDOUT}
+ end
+
+ it "should use the specified formatter" do
+ config = Configuration.new
+ config.parse!(["-f", "progress"])
+ config.options[:formats].should == {'progress' => STDOUT}
+ end
+ end
+ end
+end
|
tomtt/cucumber-skin | fe5c7e83b22bad6b7e1786c4c3e4268493782b95 | First running feature file | diff --git a/features/html_files.feature b/features/html_files.feature
new file mode 100644
index 0000000..aa97bb7
--- /dev/null
+++ b/features/html_files.feature
@@ -0,0 +1,8 @@
+Feature: Html File Formatter
+
+ @tag1 @tag2
+ Scenario: Generating files for each tag
+ When I run cucumber-skin features -o tmp/features
+ Then the directory examples/self_test/tmp/features should exist
+ And the file examples/self_test/tmp/features/tag1.html should exist
+ And the file examples/self_test/tmp/features/tag2.html should exist
diff --git a/features/step_definitions/cucumber_steps.rb b/features/step_definitions/cucumber_steps.rb
new file mode 100644
index 0000000..3f23b85
--- /dev/null
+++ b/features/step_definitions/cucumber_steps.rb
@@ -0,0 +1,30 @@
+Given /^I am in (.*)$/ do |dir|
+ @dir = dir
+end
+
+When /^I run cucumber-skin (.*)$/ do |cmd|
+ @dir ||= 'self_test'
+ full_dir ||= File.expand_path(File.dirname(__FILE__) + "/../../examples/#{@dir}")
+ Dir.chdir(full_dir) do
+ @full_cmd = "#{Cucumber::RUBY_BINARY} #{Cucumber::BINARY} --no-color #{cmd}"
+ @out = `#{@full_cmd}`
+ @status = $?.exitstatus
+ end
+end
+
+Then /^it should (fail|pass) with$/ do |success, output|
+ @out.should == output
+ if success == 'fail'
+ @status.should_not == 0
+ else
+ @status.should == 0
+ end
+end
+
+Then /^(.*) should contain$/ do |file, text|
+ IO.read(file).should == text
+end
+
+Then /^(.*) should match$/ do |file, text|
+ IO.read(file).should =~ Regexp.new(text)
+end
diff --git a/features/step_definitions/file_steps.rb b/features/step_definitions/file_steps.rb
new file mode 100644
index 0000000..83aab8f
--- /dev/null
+++ b/features/step_definitions/file_steps.rb
@@ -0,0 +1,7 @@
+Then /^the directory ([^\"]*) should exist$/ do |directory_name|
+ File.should be_directory(directory_name)
+end
+
+Then /^the file (.*) should exist$/ do |file_name|
+ File.should exist(file_name)
+end
diff --git a/features/support/env.rb b/features/support/env.rb
new file mode 100644
index 0000000..6fb989a
--- /dev/null
+++ b/features/support/env.rb
@@ -0,0 +1,9 @@
+require 'rubygems'
+require 'spec/expectations'
+require 'fileutils'
+require 'ruby-debug'
+
+After do
+ FileUtils.rm_rf 'examples/self_test/tmp'
+ FileUtils.mkdir 'examples/self_test/tmp'
+end
|
tomtt/cucumber-skin | 8bf593b38d9a8b3033195b9514e480725ebf3b6b | cucumber-skin script uses it's default formatter if none specified, so now the script can just be run like cucumber | diff --git a/bin/cucumber-skin b/bin/cucumber-skin
index b3e4b89..a1b3fa9 100755
--- a/bin/cucumber-skin
+++ b/bin/cucumber-skin
@@ -1,15 +1,19 @@
#!/usr/bin/env ruby
require File.expand_path(
File.join(File.dirname(__FILE__), %w[.. lib cucumber-skin]))
# Put your code here
# cuke_dir = File.dirname(__FILE__ + '.rb') + '/../lib'
cuke_dir = '/Users/tomtt/created/project/externals/cucumber/lib'
$:.unshift(cuke_dir) unless $:.include?(cuke_dir)
require 'cucumber/cli/main'
+
+unless ARGV.include?("-f") || ARGV.include?("--format")
+ ARGV.unshift("-f", "CucumberSkin::Formatter::HtmlFiles")
+end
Cucumber::Cli::Main.execute(ARGV.dup)
# EOF
|
tomtt/cucumber-skin | d3b21309779acac8f09e1d0192a5bd3c2f9f24c4 | Html files formatter uses results gatherer | diff --git a/lib/cucumber-skin/formatter/html_files.rb b/lib/cucumber-skin/formatter/html_files.rb
index 8f9d62f..3d3228e 100644
--- a/lib/cucumber-skin/formatter/html_files.rb
+++ b/lib/cucumber-skin/formatter/html_files.rb
@@ -1,17 +1,20 @@
require 'cucumber'
module CucumberSkin
module Formatter
class HtmlFiles
attr_accessor :options
def initialize(step_mother, io, options)
+ @gatherer = CucumberSkin::Gatherer::ResultsGatherer.new(step_mother, options)
@options = options
end
def visit_features(features)
- puts "Visiting features"
+ @gatherer.visit_features(features)
+
+ puts "Done visiting features from HtmlFiles formatter"
end
end
end
end
diff --git a/lib/cucumber-skin/gatherer/results_gatherer.rb b/lib/cucumber-skin/gatherer/results_gatherer.rb
index 1764ad7..979e1ca 100644
--- a/lib/cucumber-skin/gatherer/results_gatherer.rb
+++ b/lib/cucumber-skin/gatherer/results_gatherer.rb
@@ -1,8 +1,16 @@
require 'cucumber'
module CucumberSkin
module Gatherer
class ResultsGatherer < Cucumber::Ast::Visitor
+ def initialize(step_mother, options)
+ @options = options
+ @step_mother = step_mother
+ end
+
+ def visit_features(features)
+ puts "Done visiting features from results gatherer"
+ end
end
end
end
diff --git a/spec/cucumber-skin/gatherer/results_gatherer_spec.rb b/spec/cucumber-skin/gatherer/results_gatherer_spec.rb
index cf342ae..76a59d3 100644
--- a/spec/cucumber-skin/gatherer/results_gatherer_spec.rb
+++ b/spec/cucumber-skin/gatherer/results_gatherer_spec.rb
@@ -1,16 +1,16 @@
# spec/cucumber-skin/cucumber-skin/gatherer/results_gatherer_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
# No need to type CucumberSkin:: before each call
include CucumberSkin::Gatherer
describe ResultsGatherer do
it "should inherit from the cucumber ast visitor" do
- ResultsGatherer.new(:step_mother).should be_a(Cucumber::Ast::Visitor)
+ ResultsGatherer.new(:step_mother, {}).should be_a(Cucumber::Ast::Visitor)
end
end
# EOF
|
tomtt/cucumber-skin | b6c5d34ac87b91d99544f2cb15638e028875bf71 | Put gatherer classes in their own module | diff --git a/lib/cucumber-skin/gatherer/feature.rb b/lib/cucumber-skin/gatherer/feature.rb
index a6a008b..c42e646 100644
--- a/lib/cucumber-skin/gatherer/feature.rb
+++ b/lib/cucumber-skin/gatherer/feature.rb
@@ -1,11 +1,13 @@
module CucumberSkin
- class Feature
- attr_accessor :tags
- attr_accessor :scenarios
+ module Gatherer
+ class Feature
+ attr_accessor :tags
+ attr_accessor :scenarios
- def initialize
- @tags = TagSet.new(self)
- @scenarios = []
+ def initialize
+ @tags = TagSet.new(self)
+ @scenarios = []
+ end
end
end
end
diff --git a/lib/cucumber-skin/gatherer/results_gatherer.rb b/lib/cucumber-skin/gatherer/results_gatherer.rb
index 50a7d18..1764ad7 100644
--- a/lib/cucumber-skin/gatherer/results_gatherer.rb
+++ b/lib/cucumber-skin/gatherer/results_gatherer.rb
@@ -1,6 +1,8 @@
require 'cucumber'
module CucumberSkin
- class ResultsGatherer < Cucumber::Ast::Visitor
+ module Gatherer
+ class ResultsGatherer < Cucumber::Ast::Visitor
+ end
end
end
diff --git a/lib/cucumber-skin/gatherer/scenario.rb b/lib/cucumber-skin/gatherer/scenario.rb
index 63f6f3e..72d33d1 100644
--- a/lib/cucumber-skin/gatherer/scenario.rb
+++ b/lib/cucumber-skin/gatherer/scenario.rb
@@ -1,11 +1,13 @@
module CucumberSkin
- class Scenario
- attr_accessor :tags
- attr_accessor :steps
+ module Gatherer
+ class Scenario
+ attr_accessor :tags
+ attr_accessor :steps
- def initialize
- @tags = TagSet.new(self)
- @steps = []
+ def initialize
+ @tags = TagSet.new(self)
+ @steps = []
+ end
end
end
end
diff --git a/lib/cucumber-skin/gatherer/step.rb b/lib/cucumber-skin/gatherer/step.rb
index 9a93d93..42656a1 100644
--- a/lib/cucumber-skin/gatherer/step.rb
+++ b/lib/cucumber-skin/gatherer/step.rb
@@ -1,5 +1,7 @@
module CucumberSkin
- # class Step < Struct.new(:keyword, :step_match, :status, :source_indent, :background)
- class Step
+ module Gatherer
+ # class Step < Struct.new(:keyword, :step_match, :status, :source_indent, :background)
+ class Step
+ end
end
end
diff --git a/lib/cucumber-skin/gatherer/tag.rb b/lib/cucumber-skin/gatherer/tag.rb
index 0d15130..495dd54 100644
--- a/lib/cucumber-skin/gatherer/tag.rb
+++ b/lib/cucumber-skin/gatherer/tag.rb
@@ -1,33 +1,35 @@
require 'singleton'
module CucumberSkin
- class Tag
- @@tags = {}
+ module Gatherer
+ class Tag
+ @@tags = {}
- def self.instance(tag)
- string = tag.to_s
- @@tags[string] ||= Tag.send(:new, string)
- end
+ def self.instance(tag)
+ string = tag.to_s
+ @@tags[string] ||= Tag.send(:new, string)
+ end
- def to_s
- @string
- end
+ def to_s
+ @string
+ end
- def things_tagged
- @things_tagged.uniq
- end
+ def things_tagged
+ @things_tagged.uniq
+ end
- def tag(thing)
- @things_tagged << thing
- end
+ def tag(thing)
+ @things_tagged << thing
+ end
- private_class_method :new
+ private_class_method :new
- private
+ private
- def initialize(string)
- @string = string
- @things_tagged = []
+ def initialize(string)
+ @string = string
+ @things_tagged = []
+ end
end
end
end
diff --git a/lib/cucumber-skin/gatherer/tag_set.rb b/lib/cucumber-skin/gatherer/tag_set.rb
index a52c5fc..3124891 100644
--- a/lib/cucumber-skin/gatherer/tag_set.rb
+++ b/lib/cucumber-skin/gatherer/tag_set.rb
@@ -1,19 +1,22 @@
module CucumberSkin
- class TagSet < Array
- # I chose to inherit from an Array in stead of a Set because an array is
- # more transparant to code with (returns [] when empty for example)
- alias_method :original_append, :<<
+ module Gatherer
+ class TagSet < Array
+ # I chose to inherit from an Array in stead of a Set because an array is
+ # more transparant to code with (returns [] when empty for example)
+ alias_method :original_append, :<<
- def initialize(owner)
- @owner = owner
- end
+ def initialize(owner)
+ @owner = owner
+ end
- def <<(elem)
- tag = Tag.instance(elem)
- unless include?(tag)
- tag.tag(@owner)
- self.original_append(tag)
+ def <<(elem)
+ tag = Tag.instance(elem)
+ unless include?(tag)
+ tag.tag(@owner)
+ self.original_append(tag)
+ end
+ end
end
end
end
-end
+
diff --git a/spec/cucumber-skin/gatherer/feature_spec.rb b/spec/cucumber-skin/gatherer/feature_spec.rb
index cac35b4..0500483 100644
--- a/spec/cucumber-skin/gatherer/feature_spec.rb
+++ b/spec/cucumber-skin/gatherer/feature_spec.rb
@@ -1,90 +1,90 @@
# spec/cucumber-skin/cucumber-skin/gatherer/feature_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
# No need to type CucumberSkin:: before each call
-include CucumberSkin
+include CucumberSkin::Gatherer
describe Feature do
describe 'tags' do
it "should have an empty list of tags" do
Feature.new.tags.should == []
end
it "should contain a tag added to it" do
tag = Tag.instance('some tag')
feature = Feature.new
feature.tags << tag
feature.tags.should == [tag]
end
it "should contain all tags added to it" do
tag1 = Tag.instance('some tag')
tag2 = Tag.instance('some other tag')
tag3 = Tag.instance('last tag')
feature = Feature.new
feature.tags << tag1
feature.tags << tag3
feature.tags << tag2
feature.tags.should include(tag1, tag2, tag3)
end
it "can not contain a certain tag more than once" do
tag = Tag.instance('some tag')
feature = Feature.new
feature.tags << tag
feature.tags << tag
feature.should have(1).tags
end
# This works out of the box, but just put this here to be sure
it "can not contain a tag with an identical string more than once" do
tag1 = Tag.instance('some tag')
tag2 = 'some tag'
feature = Feature.new
feature.tags << tag1
feature.tags << tag2
feature.should have(1).tags
end
it "should allow you to add a string to its tags and convert it to a tag" do
feature = Feature.new
feature.tags << 'string_tag'
feature.tags.first.should be_instance_of(Tag)
end
it "should tag itself with the tag if a tag is added to it" do
feature = Feature.new
feature.tags << 'string_tag'
Tag.instance('string_tag').things_tagged.should include(feature)
end
end
describe 'scenarios' do
it "should have an empty list of scenarios" do
Feature.new.scenarios.should == []
end
it "should contain a scenario added to it" do
scenario = Scenario.new
feature = Feature.new
feature.scenarios << scenario
feature.scenarios.should == [scenario]
end
it "should contain all scenarios added to it" do
scenario1 = Scenario.new()
scenario2 = Scenario.new()
scenario3 = Scenario.new()
feature = Feature.new
feature.scenarios << scenario1
feature.scenarios << scenario3
feature.scenarios << scenario2
feature.scenarios.should include(scenario1, scenario2, scenario3)
end
end
end
# EOF
diff --git a/spec/cucumber-skin/gatherer/results_gatherer_spec.rb b/spec/cucumber-skin/gatherer/results_gatherer_spec.rb
index baf69a3..cf342ae 100644
--- a/spec/cucumber-skin/gatherer/results_gatherer_spec.rb
+++ b/spec/cucumber-skin/gatherer/results_gatherer_spec.rb
@@ -1,16 +1,16 @@
# spec/cucumber-skin/cucumber-skin/gatherer/results_gatherer_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
# No need to type CucumberSkin:: before each call
-include CucumberSkin
+include CucumberSkin::Gatherer
describe ResultsGatherer do
it "should inherit from the cucumber ast visitor" do
ResultsGatherer.new(:step_mother).should be_a(Cucumber::Ast::Visitor)
end
end
# EOF
diff --git a/spec/cucumber-skin/gatherer/scenario_spec.rb b/spec/cucumber-skin/gatherer/scenario_spec.rb
index df5241e..79a01a8 100644
--- a/spec/cucumber-skin/gatherer/scenario_spec.rb
+++ b/spec/cucumber-skin/gatherer/scenario_spec.rb
@@ -1,90 +1,90 @@
# spec/cucumber-skin/cucumber-skin/gatherer/scenario_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
# No need to type CucumberSkin:: before each call
-include CucumberSkin
+include CucumberSkin::Gatherer
describe Scenario do
describe 'tags' do
it "should have an empty list of tags" do
Scenario.new.tags.should == []
end
it "should contain a tag added to it" do
tag = Tag.instance('some tag')
scenario = Scenario.new
scenario.tags << tag
scenario.tags.should == [tag]
end
it "should contain all tags added to it" do
tag1 = Tag.instance('some tag')
tag2 = Tag.instance('some other tag')
tag3 = Tag.instance('last tag')
scenario = Scenario.new
scenario.tags << tag1
scenario.tags << tag3
scenario.tags << tag2
scenario.tags.should include(tag1, tag2, tag3)
end
it "can not contain a certain tag more than once" do
tag = Tag.instance('some tag')
scenario = Scenario.new
scenario.tags << tag
scenario.tags << tag
scenario.should have(1).tags
end
# This works out of the box, but just put this here to be sure
it "can not contain a tag with an identical string more than once" do
tag1 = Tag.instance('some tag')
tag2 = 'some tag'
scenario = Scenario.new
scenario.tags << tag1
scenario.tags << tag2
scenario.should have(1).tags
end
it "should allow you to add a string to its tags and convert it to a tag" do
scenario = Scenario.new
scenario.tags << 'string_tag'
scenario.tags.first.should be_instance_of(Tag)
end
it "should tag itself with the tag if a tag is added to it" do
scenario = Scenario.new
scenario.tags << 'string_tag'
Tag.instance('string_tag').things_tagged.should include(scenario)
end
end
describe 'steps' do
it "should have an empty list of steps" do
Scenario.new.steps.should == []
end
it "should contain a step added to it" do
step = Step.new
scenario = Scenario.new
scenario.steps << step
scenario.steps.should == [step]
end
it "should contain all steps added to it" do
step1 = Step.new()
step2 = Step.new()
step3 = Step.new()
scenario = Scenario.new
scenario.steps << step1
scenario.steps << step3
scenario.steps << step2
scenario.steps.should include(step1, step2, step3)
end
end
end
# EOF
diff --git a/spec/cucumber-skin/gatherer/step_spec.rb b/spec/cucumber-skin/gatherer/step_spec.rb
index 3e0b710..4f0c6a1 100644
--- a/spec/cucumber-skin/gatherer/step_spec.rb
+++ b/spec/cucumber-skin/gatherer/step_spec.rb
@@ -1,13 +1,13 @@
# spec/cucumber-skin/cucumber-skin/gatherer/step_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
# No need to type CucumberSkin:: before each call
-include CucumberSkin
+include CucumberSkin::Gatherer
describe Step do
end
# EOF
diff --git a/spec/cucumber-skin/gatherer/tag_set_spec.rb b/spec/cucumber-skin/gatherer/tag_set_spec.rb
index db178ea..310ce26 100644
--- a/spec/cucumber-skin/gatherer/tag_set_spec.rb
+++ b/spec/cucumber-skin/gatherer/tag_set_spec.rb
@@ -1,30 +1,30 @@
# spec/cucumber-skin/cucumber-skin/gatherer/tag_set_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
# No need to type CucumberSkin:: before each call
-include CucumberSkin
+include CucumberSkin::Gatherer
describe TagSet do
it "should initially be empty" do
TagSet.new(:some_owner).should == []
end
it "should contain tags created from the elements added to it" do
tag_set = TagSet.new(:some_owner)
tag_set << :one_thing
tag_set << :another_thing
tag_set.should include(Tag.instance(:one_thing), Tag.instance(:another_thing))
end
it "should tag its owner with the element added to it" do
tag = Tag.instance(:aardvark)
tag_set = TagSet.new(:some_owner)
tag.should_receive(:tag).with(:some_owner)
tag_set << :aardvark
end
end
# EOF
diff --git a/spec/cucumber-skin/gatherer/tag_spec.rb b/spec/cucumber-skin/gatherer/tag_spec.rb
index d438d6f..c684306 100644
--- a/spec/cucumber-skin/gatherer/tag_spec.rb
+++ b/spec/cucumber-skin/gatherer/tag_spec.rb
@@ -1,53 +1,53 @@
# spec/cucumber-skin/cucumber-skin/gatherer/tag_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
# No need to type CucumberSkin:: before each call
-include CucumberSkin
+include CucumberSkin::Gatherer
describe Tag do
it "should not have a public initialize method" do
lambda { Tag.new('whatever') }.should raise_error(NoMethodError)
end
it "should return a tag instance from a string" do
Tag.instance('some_tag1').should be_instance_of(Tag)
end
it "should return a tag instance from a string which converts to that string" do
Tag.instance('some_tag2').to_s.should === 'some_tag2'
end
it "should return identical tags for equal strings" do
Tag.instance('some_tag3').should be_equal(Tag.instance('some_tag3'))
end
it "should return identical tags when created from a tag or a string" do
tag = Tag.instance('some_tag4')
Tag.instance(tag).should be_equal(tag)
end
describe "things tagged" do
it "initially has nothing in the list of things it tagged" do
@tag = Tag.instance('some_tag10')
@tag.things_tagged.should be_empty
end
it "adds something that it tags to the list of things it tagged" do
@tag = Tag.instance('some_tag11')
@tag.tag(:thing)
@tag.things_tagged.should include(:thing)
end
it "has only one instance of a thing it tagged multiple times in the list of things it tagged" do
@tag = Tag.instance('some_tag12')
@tag.tag(:thing)
@tag.tag(:thing)
@tag.things_tagged.should have(1).item
end
end
end
# EOF
|
tomtt/cucumber-skin | a82d171d04ea85849f381a0f79d86d959c903161 | Some todos added to README | diff --git a/README.txt b/README.txt
index 4a1bdbb..c34840e 100644
--- a/README.txt
+++ b/README.txt
@@ -1,48 +1,53 @@
cucumber-skin
by Tom ten Thij
http://tomtenthij.co.uk
== DESCRIPTION:
A cucumber formatter that generates html files to present test results
== FEATURES/PROBLEMS:
* FIXME (list of features or problems)
+* TODO
+ * Parse options
+ * Output directory
+ * Configuration yml file
+ * Serialization
== SYNOPSIS:
FIXME (code sample of usage)
== REQUIREMENTS:
* FIXME (list of requirements)
== INSTALL:
* FIXME (sudo gem install, anything else)
== LICENSE:
(The MIT License)
Copyright (c) 2008 FIXME (different license?)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
tomtt/cucumber-skin | 615c2d2e522e523bf1d287ff60621a50b2fae6fe | Minimal code to be able to use CucumberSkin::Formatter::HtmlFiles as a formatter using cucumber-skin command (involves a hack specific for local machine for now) | diff --git a/bin/cucumber-skin b/bin/cucumber-skin
old mode 100644
new mode 100755
index bd51321..b3e4b89
--- a/bin/cucumber-skin
+++ b/bin/cucumber-skin
@@ -1,8 +1,15 @@
#!/usr/bin/env ruby
require File.expand_path(
File.join(File.dirname(__FILE__), %w[.. lib cucumber-skin]))
# Put your code here
+# cuke_dir = File.dirname(__FILE__ + '.rb') + '/../lib'
+cuke_dir = '/Users/tomtt/created/project/externals/cucumber/lib'
+$:.unshift(cuke_dir) unless $:.include?(cuke_dir)
+
+require 'cucumber/cli/main'
+Cucumber::Cli::Main.execute(ARGV.dup)
+
# EOF
diff --git a/lib/cucumber-skin/formatter/html_files.rb b/lib/cucumber-skin/formatter/html_files.rb
index 77a9b67..8f9d62f 100644
--- a/lib/cucumber-skin/formatter/html_files.rb
+++ b/lib/cucumber-skin/formatter/html_files.rb
@@ -1,7 +1,17 @@
+require 'cucumber'
module CucumberSkin
module Formatter
class HtmlFiles
+ attr_accessor :options
+
+ def initialize(step_mother, io, options)
+ @options = options
+ end
+
+ def visit_features(features)
+ puts "Visiting features"
+ end
end
end
end
diff --git a/spec/cucumber-skin/formatter/html_files_spec.rb b/spec/cucumber-skin/formatter/html_files_spec.rb
index aa0e324..a0ac6ff 100644
--- a/spec/cucumber-skin/formatter/html_files_spec.rb
+++ b/spec/cucumber-skin/formatter/html_files_spec.rb
@@ -1,13 +1,16 @@
# spec/cucumber-skin/cucumber-skin/formatter/html_files_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
-# No need to type CucumberSkin:: before each call
-include CucumberSkin
+# No need to type CucumberSkin::Formatter before each call
+include CucumberSkin::Formatter
describe HtmlFiles do
+ it "should implement visit_features" do
+ HtmlFiles.new(:step_mother, :io, {}).should respond_to(:visit_features)
+ end
end
# EOF
|
tomtt/cucumber-skin | bcb0906160274ed65de7818177bd8d99188e96f4 | HtmlFiles formatter | diff --git a/lib/cucumber-skin/formatter/html_files.rb b/lib/cucumber-skin/formatter/html_files.rb
new file mode 100644
index 0000000..77a9b67
--- /dev/null
+++ b/lib/cucumber-skin/formatter/html_files.rb
@@ -0,0 +1,7 @@
+
+module CucumberSkin
+ module Formatter
+ class HtmlFiles
+ end
+ end
+end
diff --git a/spec/cucumber-skin/formatter/html_files_spec.rb b/spec/cucumber-skin/formatter/html_files_spec.rb
new file mode 100644
index 0000000..aa0e324
--- /dev/null
+++ b/spec/cucumber-skin/formatter/html_files_spec.rb
@@ -0,0 +1,13 @@
+# spec/cucumber-skin/cucumber-skin/formatter/html_files_spec.rb
+# $Id$
+
+# Require the spec helper relative to this file
+require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
+
+# No need to type CucumberSkin:: before each call
+include CucumberSkin
+
+describe HtmlFiles do
+end
+
+# EOF
|
tomtt/cucumber-skin | b465a35baf22572b6d027e1584dd201dee654d01 | Ignore pkg/ (for now?) | diff --git a/.gitignore b/.gitignore
index 7b0cd9d..ba9c631 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,7 @@
.DS_Store
*~
*.swp
*.log
.project
tmp
+pkg
|
tomtt/cucumber-skin | 46e5ee188526e94e8617c5927b4cf908ed0efbb9 | Changed version to 0.0.1 | diff --git a/History.txt b/History.txt
index 529b2b0..ee47a23 100644
--- a/History.txt
+++ b/History.txt
@@ -1,4 +1,4 @@
-== 1.0.0 / 2009-03-25
+== 0.0.1 / 2009-03-25
* 1 major enhancement
* Birthday!
diff --git a/lib/cucumber-skin.rb b/lib/cucumber-skin.rb
index 5a1d0f9..57218f3 100644
--- a/lib/cucumber-skin.rb
+++ b/lib/cucumber-skin.rb
@@ -1,49 +1,49 @@
module CucumberSkin
# :stopdoc:
- VERSION = '1.0.0'
+ VERSION = '0.0.1'
LIBPATH = ::File.expand_path(::File.dirname(__FILE__)) + ::File::SEPARATOR
PATH = ::File.dirname(LIBPATH) + ::File::SEPARATOR
# :startdoc:
# Returns the version string for the library.
#
def self.version
VERSION
end
# Returns the library path for the module. If any arguments are given,
# they will be joined to the end of the libray path using
# <tt>File.join</tt>.
#
def self.libpath( *args )
args.empty? ? LIBPATH : ::File.join(LIBPATH, args.flatten)
end
# Returns the lpath for the module. If any arguments are given,
# they will be joined to the end of the path using
# <tt>File.join</tt>.
#
def self.path( *args )
args.empty? ? PATH : ::File.join(PATH, args.flatten)
end
# Utility method used to require all files ending in .rb that lie in the
# directory below this file that has the same name as the filename passed
# in. Optionally, a specific _directory_ name can be passed in such that
# the _filename_ does not have to be equivalent to the directory.
#
def self.require_all_libs_relative_to( fname, dir = nil )
dir ||= ::File.basename(fname, '.*')
search_me = ::File.expand_path(
::File.join(::File.dirname(fname), dir, '**', '*.rb'))
Dir.glob(search_me).sort.each {|rb| require rb}
end
end # module CucumberSkin
CucumberSkin.require_all_libs_relative_to(__FILE__)
# EOF
|
tomtt/cucumber-skin | 25d5b70b92429da7f8217be9e2ae6b6500b40f12 | Start on results gatherer | diff --git a/lib/cucumber-skin/results_gatherer.rb b/lib/cucumber-skin/results_gatherer.rb
new file mode 100644
index 0000000..50a7d18
--- /dev/null
+++ b/lib/cucumber-skin/results_gatherer.rb
@@ -0,0 +1,6 @@
+require 'cucumber'
+
+module CucumberSkin
+ class ResultsGatherer < Cucumber::Ast::Visitor
+ end
+end
diff --git a/spec/cucumber-skin/results_gatherer_spec.rb b/spec/cucumber-skin/results_gatherer_spec.rb
new file mode 100644
index 0000000..c34ff76
--- /dev/null
+++ b/spec/cucumber-skin/results_gatherer_spec.rb
@@ -0,0 +1,16 @@
+# spec/cucumber-skin/cucumber-skin/results_gatherer_spec.rb
+# $Id$
+
+# Require the spec helper relative to this file
+require File.join(File.dirname(__FILE__), %w[ .. spec_helper])
+
+# No need to type CucumberSkin:: before each call
+include CucumberSkin
+
+describe ResultsGatherer do
+ it "should inherit from the cucumber ast visitor" do
+ ResultsGatherer.new(:step_mother).should be_a(Cucumber::Ast::Visitor)
+ end
+end
+
+# EOF
|
tomtt/cucumber-skin | ef3d23b9cc670297fff53e7b9ab420afeb3a1011 | Comment with wild stab at what a step might become | diff --git a/lib/cucumber-skin/step.rb b/lib/cucumber-skin/step.rb
index bb3ab56..9a93d93 100644
--- a/lib/cucumber-skin/step.rb
+++ b/lib/cucumber-skin/step.rb
@@ -1,4 +1,5 @@
module CucumberSkin
+ # class Step < Struct.new(:keyword, :step_match, :status, :source_indent, :background)
class Step
end
end
|
tomtt/cucumber-skin | f1656dfdac554c451c485e1fbe1254ba1a82a688 | A scenario has steps | diff --git a/lib/cucumber-skin/scenario.rb b/lib/cucumber-skin/scenario.rb
index 550f6c2..63f6f3e 100644
--- a/lib/cucumber-skin/scenario.rb
+++ b/lib/cucumber-skin/scenario.rb
@@ -1,10 +1,11 @@
module CucumberSkin
class Scenario
attr_accessor :tags
+ attr_accessor :steps
def initialize
@tags = TagSet.new(self)
- @scenarios = []
+ @steps = []
end
end
end
diff --git a/lib/cucumber-skin/step.rb b/lib/cucumber-skin/step.rb
new file mode 100644
index 0000000..bb3ab56
--- /dev/null
+++ b/lib/cucumber-skin/step.rb
@@ -0,0 +1,4 @@
+module CucumberSkin
+ class Step
+ end
+end
diff --git a/spec/cucumber-skin/scenario_spec.rb b/spec/cucumber-skin/scenario_spec.rb
index 05c9c7b..eb3e575 100644
--- a/spec/cucumber-skin/scenario_spec.rb
+++ b/spec/cucumber-skin/scenario_spec.rb
@@ -1,66 +1,90 @@
# spec/cucumber-skin/cucumber-skin/scenario_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. spec_helper])
# No need to type CucumberSkin:: before each call
include CucumberSkin
describe Scenario do
describe 'tags' do
it "should have an empty list of tags" do
Scenario.new.tags.should == []
end
it "should contain a tag added to it" do
tag = Tag.instance('some tag')
scenario = Scenario.new
scenario.tags << tag
scenario.tags.should == [tag]
end
it "should contain all tags added to it" do
tag1 = Tag.instance('some tag')
tag2 = Tag.instance('some other tag')
tag3 = Tag.instance('last tag')
scenario = Scenario.new
scenario.tags << tag1
scenario.tags << tag3
scenario.tags << tag2
scenario.tags.should include(tag1, tag2, tag3)
end
it "can not contain a certain tag more than once" do
tag = Tag.instance('some tag')
scenario = Scenario.new
scenario.tags << tag
scenario.tags << tag
scenario.should have(1).tags
end
# This works out of the box, but just put this here to be sure
it "can not contain a tag with an identical string more than once" do
tag1 = Tag.instance('some tag')
tag2 = 'some tag'
scenario = Scenario.new
scenario.tags << tag1
scenario.tags << tag2
scenario.should have(1).tags
end
it "should allow you to add a string to its tags and convert it to a tag" do
scenario = Scenario.new
scenario.tags << 'string_tag'
scenario.tags.first.should be_instance_of(Tag)
end
it "should tag itself with the tag if a tag is added to it" do
scenario = Scenario.new
scenario.tags << 'string_tag'
Tag.instance('string_tag').things_tagged.should include(scenario)
end
end
+
+ describe 'steps' do
+ it "should have an empty list of steps" do
+ Scenario.new.steps.should == []
+ end
+
+ it "should contain a step added to it" do
+ step = Step.new
+ scenario = Scenario.new
+ scenario.steps << step
+ scenario.steps.should == [step]
+ end
+
+ it "should contain all steps added to it" do
+ step1 = Step.new()
+ step2 = Step.new()
+ step3 = Step.new()
+ scenario = Scenario.new
+ scenario.steps << step1
+ scenario.steps << step3
+ scenario.steps << step2
+ scenario.steps.should include(step1, step2, step3)
+ end
+ end
end
# EOF
diff --git a/spec/cucumber-skin/step_spec.rb b/spec/cucumber-skin/step_spec.rb
new file mode 100644
index 0000000..44b7b4b
--- /dev/null
+++ b/spec/cucumber-skin/step_spec.rb
@@ -0,0 +1,13 @@
+# spec/cucumber-skin/cucumber-skin/step_spec.rb
+# $Id$
+
+# Require the spec helper relative to this file
+require File.join(File.dirname(__FILE__), %w[ .. spec_helper])
+
+# No need to type CucumberSkin:: before each call
+include CucumberSkin
+
+describe Step do
+end
+
+# EOF
|
tomtt/cucumber-skin | 8e1deee486575b991a0a5c33d0cf811fa36745a0 | Use TagSet for scenario | diff --git a/lib/cucumber-skin/scenario.rb b/lib/cucumber-skin/scenario.rb
index ffe1afd..550f6c2 100644
--- a/lib/cucumber-skin/scenario.rb
+++ b/lib/cucumber-skin/scenario.rb
@@ -1,4 +1,10 @@
module CucumberSkin
class Scenario
+ attr_accessor :tags
+
+ def initialize
+ @tags = TagSet.new(self)
+ @scenarios = []
+ end
end
end
diff --git a/spec/cucumber-skin/scenario_spec.rb b/spec/cucumber-skin/scenario_spec.rb
index 77c89e5..05c9c7b 100644
--- a/spec/cucumber-skin/scenario_spec.rb
+++ b/spec/cucumber-skin/scenario_spec.rb
@@ -1,13 +1,66 @@
# spec/cucumber-skin/cucumber-skin/scenario_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. spec_helper])
# No need to type CucumberSkin:: before each call
include CucumberSkin
describe Scenario do
+ describe 'tags' do
+ it "should have an empty list of tags" do
+ Scenario.new.tags.should == []
+ end
+
+ it "should contain a tag added to it" do
+ tag = Tag.instance('some tag')
+ scenario = Scenario.new
+ scenario.tags << tag
+ scenario.tags.should == [tag]
+ end
+
+ it "should contain all tags added to it" do
+ tag1 = Tag.instance('some tag')
+ tag2 = Tag.instance('some other tag')
+ tag3 = Tag.instance('last tag')
+ scenario = Scenario.new
+ scenario.tags << tag1
+ scenario.tags << tag3
+ scenario.tags << tag2
+ scenario.tags.should include(tag1, tag2, tag3)
+ end
+
+ it "can not contain a certain tag more than once" do
+ tag = Tag.instance('some tag')
+ scenario = Scenario.new
+ scenario.tags << tag
+ scenario.tags << tag
+ scenario.should have(1).tags
+ end
+
+ # This works out of the box, but just put this here to be sure
+ it "can not contain a tag with an identical string more than once" do
+ tag1 = Tag.instance('some tag')
+ tag2 = 'some tag'
+ scenario = Scenario.new
+ scenario.tags << tag1
+ scenario.tags << tag2
+ scenario.should have(1).tags
+ end
+
+ it "should allow you to add a string to its tags and convert it to a tag" do
+ scenario = Scenario.new
+ scenario.tags << 'string_tag'
+ scenario.tags.first.should be_instance_of(Tag)
+ end
+
+ it "should tag itself with the tag if a tag is added to it" do
+ scenario = Scenario.new
+ scenario.tags << 'string_tag'
+ Tag.instance('string_tag').things_tagged.should include(scenario)
+ end
+ end
end
# EOF
|
tomtt/cucumber-skin | c612f9d668f1d7ba1aba00c6a08b770640636630 | Extracted TagSet class | diff --git a/lib/cucumber-skin/feature.rb b/lib/cucumber-skin/feature.rb
index e24de3a..a6a008b 100644
--- a/lib/cucumber-skin/feature.rb
+++ b/lib/cucumber-skin/feature.rb
@@ -1,30 +1,11 @@
module CucumberSkin
- class FeatureTagSet < Array
- # I chose to inherit from an Array in stead of a Set because an array is
- # more transparant to code with (returns [] when empty for example)
- alias_method :original_append, :<<
-
- def initialize(feature)
- @feature = feature
- end
-
- def <<(elem)
- tag = Tag.instance(elem)
- unless include?(tag)
- tag.tag(@feature)
- self.original_append(tag)
- end
- end
- end
-
class Feature
attr_accessor :tags
attr_accessor :scenarios
def initialize
- @tags = FeatureTagSet.new(self)
+ @tags = TagSet.new(self)
@scenarios = []
end
-
end
end
diff --git a/lib/cucumber-skin/tag_set.rb b/lib/cucumber-skin/tag_set.rb
new file mode 100644
index 0000000..a52c5fc
--- /dev/null
+++ b/lib/cucumber-skin/tag_set.rb
@@ -0,0 +1,19 @@
+module CucumberSkin
+ class TagSet < Array
+ # I chose to inherit from an Array in stead of a Set because an array is
+ # more transparant to code with (returns [] when empty for example)
+ alias_method :original_append, :<<
+
+ def initialize(owner)
+ @owner = owner
+ end
+
+ def <<(elem)
+ tag = Tag.instance(elem)
+ unless include?(tag)
+ tag.tag(@owner)
+ self.original_append(tag)
+ end
+ end
+ end
+end
diff --git a/spec/cucumber-skin/tag_set_spec.rb b/spec/cucumber-skin/tag_set_spec.rb
new file mode 100644
index 0000000..1312c86
--- /dev/null
+++ b/spec/cucumber-skin/tag_set_spec.rb
@@ -0,0 +1,30 @@
+# spec/cucumber-skin/cucumber-skin/tag_set_spec.rb
+# $Id$
+
+# Require the spec helper relative to this file
+require File.join(File.dirname(__FILE__), %w[ .. spec_helper])
+
+# No need to type CucumberSkin:: before each call
+include CucumberSkin
+
+describe TagSet do
+ it "should initially be empty" do
+ TagSet.new(:some_owner).should == []
+ end
+
+ it "should contain tags created from the elements added to it" do
+ tag_set = TagSet.new(:some_owner)
+ tag_set << :one_thing
+ tag_set << :another_thing
+ tag_set.should include(Tag.instance(:one_thing), Tag.instance(:another_thing))
+ end
+
+ it "should tag its owner with the element added to it" do
+ tag = Tag.instance(:aardvark)
+ tag_set = TagSet.new(:some_owner)
+ tag.should_receive(:tag).with(:some_owner)
+ tag_set << :aardvark
+ end
+end
+
+# EOF
|
tomtt/cucumber-skin | 6e383e5da52b858bcfa64fca37b4cc68700a85f5 | A feature tags itself with the tag if a tag is added to it | diff --git a/lib/cucumber-skin/feature.rb b/lib/cucumber-skin/feature.rb
index bab40ce..e24de3a 100644
--- a/lib/cucumber-skin/feature.rb
+++ b/lib/cucumber-skin/feature.rb
@@ -1,30 +1,30 @@
module CucumberSkin
class FeatureTagSet < Array
# I chose to inherit from an Array in stead of a Set because an array is
# more transparant to code with (returns [] when empty for example)
alias_method :original_append, :<<
def initialize(feature)
@feature = feature
end
def <<(elem)
tag = Tag.instance(elem)
- tag.tag(@feature)
unless include?(tag)
+ tag.tag(@feature)
self.original_append(tag)
end
end
end
class Feature
attr_accessor :tags
attr_accessor :scenarios
def initialize
@tags = FeatureTagSet.new(self)
@scenarios = []
end
end
end
|
tomtt/cucumber-skin | 3f299fcc6db981c9dcf6dccd3afc5b89a006b109 | A feature tags itself with the tag if a tag is added to it | diff --git a/lib/cucumber-skin/feature.rb b/lib/cucumber-skin/feature.rb
index 1a02ff2..bab40ce 100644
--- a/lib/cucumber-skin/feature.rb
+++ b/lib/cucumber-skin/feature.rb
@@ -1,25 +1,30 @@
module CucumberSkin
- class TagSet < Array
+ class FeatureTagSet < Array
# I chose to inherit from an Array in stead of a Set because an array is
# more transparant to code with (returns [] when empty for example)
alias_method :original_append, :<<
+ def initialize(feature)
+ @feature = feature
+ end
+
def <<(elem)
tag = Tag.instance(elem)
+ tag.tag(@feature)
unless include?(tag)
self.original_append(tag)
end
end
end
class Feature
attr_accessor :tags
attr_accessor :scenarios
def initialize
- @tags = TagSet.new
+ @tags = FeatureTagSet.new(self)
@scenarios = []
end
end
end
diff --git a/spec/cucumber-skin/feature_spec.rb b/spec/cucumber-skin/feature_spec.rb
index 4ca4759..87f33f4 100644
--- a/spec/cucumber-skin/feature_spec.rb
+++ b/spec/cucumber-skin/feature_spec.rb
@@ -1,84 +1,90 @@
# spec/cucumber-skin/cucumber-skin/feature_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. spec_helper])
# No need to type CucumberSkin:: before each call
include CucumberSkin
describe Feature do
describe 'tags' do
it "should have an empty list of tags" do
Feature.new.tags.should == []
end
it "should contain a tag added to it" do
tag = Tag.instance('some tag')
feature = Feature.new
feature.tags << tag
feature.tags.should == [tag]
end
it "should contain all tags added to it" do
tag1 = Tag.instance('some tag')
tag2 = Tag.instance('some other tag')
tag3 = Tag.instance('last tag')
feature = Feature.new
feature.tags << tag1
feature.tags << tag3
feature.tags << tag2
feature.tags.should include(tag1, tag2, tag3)
end
it "can not contain a certain tag more than once" do
tag = Tag.instance('some tag')
feature = Feature.new
feature.tags << tag
feature.tags << tag
feature.should have(1).tags
end
# This works out of the box, but just put this here to be sure
it "can not contain a tag with an identical string more than once" do
tag1 = Tag.instance('some tag')
tag2 = 'some tag'
feature = Feature.new
feature.tags << tag1
feature.tags << tag2
feature.should have(1).tags
end
it "should allow you to add a string to its tags and convert it to a tag" do
feature = Feature.new
feature.tags << 'string_tag'
feature.tags.first.should be_instance_of(Tag)
end
+
+ it "should tag itself with the tag if a tag is added to it" do
+ feature = Feature.new
+ feature.tags << 'string_tag'
+ Tag.instance('string_tag').things_tagged.should include(feature)
+ end
end
describe 'scenarios' do
it "should have an empty list of scenarios" do
Feature.new.scenarios.should == []
end
it "should contain a scenario added to it" do
scenario = Scenario.new
feature = Feature.new
feature.scenarios << scenario
feature.scenarios.should == [scenario]
end
it "should contain all scenarios added to it" do
scenario1 = Scenario.new()
scenario2 = Scenario.new()
scenario3 = Scenario.new()
feature = Feature.new
feature.scenarios << scenario1
feature.scenarios << scenario3
feature.scenarios << scenario2
feature.scenarios.should include(scenario1, scenario2, scenario3)
end
end
end
# EOF
|
tomtt/cucumber-skin | e8958110603bc8f2b23cc5afe677deb43c747aac | Maintaining a list of things a tag tagged | diff --git a/lib/cucumber-skin/tag.rb b/lib/cucumber-skin/tag.rb
index 5017dbf..0d15130 100644
--- a/lib/cucumber-skin/tag.rb
+++ b/lib/cucumber-skin/tag.rb
@@ -1,24 +1,33 @@
require 'singleton'
module CucumberSkin
class Tag
@@tags = {}
def self.instance(tag)
string = tag.to_s
@@tags[string] ||= Tag.send(:new, string)
end
def to_s
@string
end
+ def things_tagged
+ @things_tagged.uniq
+ end
+
+ def tag(thing)
+ @things_tagged << thing
+ end
+
private_class_method :new
private
def initialize(string)
@string = string
+ @things_tagged = []
end
end
end
diff --git a/spec/cucumber-skin/tag_spec.rb b/spec/cucumber-skin/tag_spec.rb
index 0074319..1d9e6ae 100644
--- a/spec/cucumber-skin/tag_spec.rb
+++ b/spec/cucumber-skin/tag_spec.rb
@@ -1,33 +1,53 @@
# spec/cucumber-skin/cucumber-skin/tag_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. spec_helper])
# No need to type CucumberSkin:: before each call
include CucumberSkin
describe Tag do
it "should not have a public initialize method" do
lambda { Tag.new('whatever') }.should raise_error(NoMethodError)
end
it "should return a tag instance from a string" do
- Tag.instance('some_tag').should be_instance_of(Tag)
+ Tag.instance('some_tag1').should be_instance_of(Tag)
end
it "should return a tag instance from a string which converts to that string" do
- Tag.instance('some_tag').to_s.should === 'some_tag'
+ Tag.instance('some_tag2').to_s.should === 'some_tag2'
end
it "should return identical tags for equal strings" do
- Tag.instance('some_tag').should be_equal(Tag.instance('some_tag'))
+ Tag.instance('some_tag3').should be_equal(Tag.instance('some_tag3'))
end
it "should return identical tags when created from a tag or a string" do
- tag = Tag.instance('some_tag')
+ tag = Tag.instance('some_tag4')
Tag.instance(tag).should be_equal(tag)
end
+
+ describe "things tagged" do
+ it "initially has nothing in the list of things it tagged" do
+ @tag = Tag.instance('some_tag10')
+ @tag.things_tagged.should be_empty
+ end
+
+ it "adds something that it tags to the list of things it tagged" do
+ @tag = Tag.instance('some_tag11')
+ @tag.tag(:thing)
+ @tag.things_tagged.should include(:thing)
+ end
+
+ it "has only one instance of a thing it tagged multiple times in the list of things it tagged" do
+ @tag = Tag.instance('some_tag12')
+ @tag.tag(:thing)
+ @tag.tag(:thing)
+ @tag.things_tagged.should have(1).item
+ end
+ end
end
# EOF
|
tomtt/cucumber-skin | 2e4f7173cbc856857a9fd4a6396ca58a84cbae52 | Tag class ensures there is only one instance of a certain tag | diff --git a/lib/cucumber-skin/feature.rb b/lib/cucumber-skin/feature.rb
index 9dea6b6..1a02ff2 100644
--- a/lib/cucumber-skin/feature.rb
+++ b/lib/cucumber-skin/feature.rb
@@ -1,27 +1,25 @@
module CucumberSkin
class TagSet < Array
# I chose to inherit from an Array in stead of a Set because an array is
# more transparant to code with (returns [] when empty for example)
alias_method :original_append, :<<
def <<(elem)
- unless(Tag === elem)
- elem = Tag.new(elem)
- end
- unless include?(elem)
- self.original_append(elem)
+ tag = Tag.instance(elem)
+ unless include?(tag)
+ self.original_append(tag)
end
end
end
class Feature
attr_accessor :tags
attr_accessor :scenarios
def initialize
@tags = TagSet.new
@scenarios = []
end
end
end
diff --git a/lib/cucumber-skin/tag.rb b/lib/cucumber-skin/tag.rb
index a62249b..5017dbf 100644
--- a/lib/cucumber-skin/tag.rb
+++ b/lib/cucumber-skin/tag.rb
@@ -1,4 +1,24 @@
+require 'singleton'
+
module CucumberSkin
- class Tag < Struct.new(:label)
+ class Tag
+ @@tags = {}
+
+ def self.instance(tag)
+ string = tag.to_s
+ @@tags[string] ||= Tag.send(:new, string)
+ end
+
+ def to_s
+ @string
+ end
+
+ private_class_method :new
+
+ private
+
+ def initialize(string)
+ @string = string
+ end
end
end
diff --git a/spec/cucumber-skin/feature_spec.rb b/spec/cucumber-skin/feature_spec.rb
index 104453b..4ca4759 100644
--- a/spec/cucumber-skin/feature_spec.rb
+++ b/spec/cucumber-skin/feature_spec.rb
@@ -1,84 +1,84 @@
# spec/cucumber-skin/cucumber-skin/feature_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. spec_helper])
# No need to type CucumberSkin:: before each call
include CucumberSkin
describe Feature do
describe 'tags' do
it "should have an empty list of tags" do
Feature.new.tags.should == []
end
it "should contain a tag added to it" do
- tag = Tag.new('some tag')
+ tag = Tag.instance('some tag')
feature = Feature.new
feature.tags << tag
feature.tags.should == [tag]
end
it "should contain all tags added to it" do
- tag1 = Tag.new('some tag')
- tag2 = Tag.new('some other tag')
- tag3 = Tag.new('last tag')
+ tag1 = Tag.instance('some tag')
+ tag2 = Tag.instance('some other tag')
+ tag3 = Tag.instance('last tag')
feature = Feature.new
feature.tags << tag1
feature.tags << tag3
feature.tags << tag2
feature.tags.should include(tag1, tag2, tag3)
end
it "can not contain a certain tag more than once" do
- tag = Tag.new('some tag')
+ tag = Tag.instance('some tag')
feature = Feature.new
feature.tags << tag
feature.tags << tag
feature.should have(1).tags
end
# This works out of the box, but just put this here to be sure
it "can not contain a tag with an identical string more than once" do
- tag1 = Tag.new('some tag')
+ tag1 = Tag.instance('some tag')
tag2 = 'some tag'
feature = Feature.new
feature.tags << tag1
feature.tags << tag2
feature.should have(1).tags
end
it "should allow you to add a string to its tags and convert it to a tag" do
feature = Feature.new
feature.tags << 'string_tag'
feature.tags.first.should be_instance_of(Tag)
end
end
describe 'scenarios' do
it "should have an empty list of scenarios" do
Feature.new.scenarios.should == []
end
it "should contain a scenario added to it" do
scenario = Scenario.new
feature = Feature.new
feature.scenarios << scenario
feature.scenarios.should == [scenario]
end
it "should contain all scenarios added to it" do
scenario1 = Scenario.new()
scenario2 = Scenario.new()
scenario3 = Scenario.new()
feature = Feature.new
feature.scenarios << scenario1
feature.scenarios << scenario3
feature.scenarios << scenario2
feature.scenarios.should include(scenario1, scenario2, scenario3)
end
end
end
# EOF
diff --git a/spec/cucumber-skin/tag_spec.rb b/spec/cucumber-skin/tag_spec.rb
index 8cba425..0074319 100644
--- a/spec/cucumber-skin/tag_spec.rb
+++ b/spec/cucumber-skin/tag_spec.rb
@@ -1,23 +1,33 @@
# spec/cucumber-skin/cucumber-skin/tag_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. spec_helper])
# No need to type CucumberSkin:: before each call
include CucumberSkin
describe Tag do
- it "should have the string it was initialized with as its label" do
- Tag.new('some_tag').label.should == 'some_tag'
+ it "should not have a public initialize method" do
+ lambda { Tag.new('whatever') }.should raise_error(NoMethodError)
end
- # This works out of the box, but just put this here to be sure
- it "should be identical if its label is identical" do
- tag1 = Tag.new('some tag')
- tag2 = Tag.new('some tag')
- tag1.should == tag2
+ it "should return a tag instance from a string" do
+ Tag.instance('some_tag').should be_instance_of(Tag)
+ end
+
+ it "should return a tag instance from a string which converts to that string" do
+ Tag.instance('some_tag').to_s.should === 'some_tag'
+ end
+
+ it "should return identical tags for equal strings" do
+ Tag.instance('some_tag').should be_equal(Tag.instance('some_tag'))
+ end
+
+ it "should return identical tags when created from a tag or a string" do
+ tag = Tag.instance('some_tag')
+ Tag.instance(tag).should be_equal(tag)
end
end
# EOF
|
tomtt/cucumber-skin | 3046b38dec5ca1325c9b5719e8ef999acd29fc18 | Files to allow specs to run using the spec command | diff --git a/script/spec b/script/spec
new file mode 100755
index 0000000..c54cba1
--- /dev/null
+++ b/script/spec
@@ -0,0 +1,5 @@
+#!/usr/bin/env ruby
+$LOAD_PATH.unshift(File.expand_path(File.dirname(__FILE__) + "/../vendor/plugins/rspec/lib"))
+require 'rubygems'
+require 'spec'
+exit ::Spec::Runner::CommandLine.run(::Spec::Runner::OptionParser.parse(ARGV, STDERR, STDOUT))
diff --git a/spec/spec.opts b/spec/spec.opts
new file mode 100644
index 0000000..bff8faf
--- /dev/null
+++ b/spec/spec.opts
@@ -0,0 +1,4 @@
+--colour
+--format p
+--loadby mtime
+--reverse
diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb
index 9f1208c..5b34334 100644
--- a/spec/spec_helper.rb
+++ b/spec/spec_helper.rb
@@ -1,18 +1,17 @@
+require 'ruby-debug'
require File.expand_path(
File.join(File.dirname(__FILE__), %w[.. lib cucumber-skin]))
Spec::Runner.configure do |config|
# == Mock Framework
#
# RSpec uses it's own mocking framework by default. If you prefer to
# use mocha, flexmock or RR, uncomment the appropriate line:
#
# config.mock_with :mocha
# config.mock_with :flexmock
# config.mock_with :rr
end
-require 'ruby-debug'
-
# EOF
|
tomtt/cucumber-skin | 326e2ad19471d49c45070bf2f4b2dbacead7a803 | Basic ability to add scenarios to features | diff --git a/lib/cucumber-skin/feature.rb b/lib/cucumber-skin/feature.rb
index 44ddef9..9dea6b6 100644
--- a/lib/cucumber-skin/feature.rb
+++ b/lib/cucumber-skin/feature.rb
@@ -1,25 +1,27 @@
module CucumberSkin
class TagSet < Array
# I chose to inherit from an Array in stead of a Set because an array is
# more transparant to code with (returns [] when empty for example)
alias_method :original_append, :<<
def <<(elem)
unless(Tag === elem)
elem = Tag.new(elem)
end
unless include?(elem)
self.original_append(elem)
end
end
end
class Feature
attr_accessor :tags
+ attr_accessor :scenarios
def initialize
@tags = TagSet.new
+ @scenarios = []
end
end
end
diff --git a/lib/cucumber-skin/scenario.rb b/lib/cucumber-skin/scenario.rb
new file mode 100644
index 0000000..ffe1afd
--- /dev/null
+++ b/lib/cucumber-skin/scenario.rb
@@ -0,0 +1,4 @@
+module CucumberSkin
+ class Scenario
+ end
+end
diff --git a/spec/cucumber-skin/feature/feature_spec.rb b/spec/cucumber-skin/feature/feature_spec.rb
index dabb65c..c41d8a8 100644
--- a/spec/cucumber-skin/feature/feature_spec.rb
+++ b/spec/cucumber-skin/feature/feature_spec.rb
@@ -1,60 +1,84 @@
# spec/cucumber-skin/cucumber-skin/feature_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
# No need to type CucumberSkin:: before each call
include CucumberSkin
describe Feature do
describe 'tags' do
it "should have an empty list of tags" do
Feature.new.tags.should == []
end
it "should contain a tag added to it" do
tag = Tag.new('some tag')
feature = Feature.new
feature.tags << tag
feature.tags.should == [tag]
end
it "should contain all tags added to it" do
tag1 = Tag.new('some tag')
tag2 = Tag.new('some other tag')
tag3 = Tag.new('last tag')
feature = Feature.new
feature.tags << tag1
feature.tags << tag3
feature.tags << tag2
feature.tags.should include(tag1, tag2, tag3)
end
it "can not contain a certain tag more than once" do
tag = Tag.new('some tag')
feature = Feature.new
feature.tags << tag
feature.tags << tag
feature.should have(1).tags
end
# This works out of the box, but just put this here to be sure
it "can not contain a tag with an identical string more than once" do
tag1 = Tag.new('some tag')
tag2 = 'some tag'
feature = Feature.new
feature.tags << tag1
feature.tags << tag2
feature.should have(1).tags
end
it "should allow you to add a string to its tags and convert it to a tag" do
feature = Feature.new
feature.tags << 'string_tag'
feature.tags.first.should be_instance_of(Tag)
end
end
+
+ describe 'scenarios' do
+ it "should have an empty list of scenarios" do
+ Feature.new.scenarios.should == []
+ end
+
+ it "should contain a scenario added to it" do
+ scenario = Scenario.new
+ feature = Feature.new
+ feature.scenarios << scenario
+ feature.scenarios.should == [scenario]
+ end
+
+ it "should contain all scenarios added to it" do
+ scenario1 = Scenario.new()
+ scenario2 = Scenario.new()
+ scenario3 = Scenario.new()
+ feature = Feature.new
+ feature.scenarios << scenario1
+ feature.scenarios << scenario3
+ feature.scenarios << scenario2
+ feature.scenarios.should include(scenario1, scenario2, scenario3)
+ end
+ end
end
# EOF
diff --git a/spec/cucumber-skin/feature/scenario_spec.rb b/spec/cucumber-skin/feature/scenario_spec.rb
new file mode 100644
index 0000000..6c3a034
--- /dev/null
+++ b/spec/cucumber-skin/feature/scenario_spec.rb
@@ -0,0 +1,13 @@
+# spec/cucumber-skin/cucumber-skin/scenario_spec.rb
+# $Id$
+
+# Require the spec helper relative to this file
+require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
+
+# No need to type CucumberSkin:: before each call
+include CucumberSkin
+
+describe Scenario do
+end
+
+# EOF
|
tomtt/cucumber-skin | 199c01156a0af5c06bde7b3766372dc465cafbe5 | Some tested feature and tag methods | diff --git a/lib/cucumber-skin/feature.rb b/lib/cucumber-skin/feature.rb
index 37832ac..44ddef9 100644
--- a/lib/cucumber-skin/feature.rb
+++ b/lib/cucumber-skin/feature.rb
@@ -1,6 +1,25 @@
-puts 'Required feature'
+module CucumberSkin
+ class TagSet < Array
+ # I chose to inherit from an Array in stead of a Set because an array is
+ # more transparant to code with (returns [] when empty for example)
+ alias_method :original_append, :<<
+
+ def <<(elem)
+ unless(Tag === elem)
+ elem = Tag.new(elem)
+ end
+ unless include?(elem)
+ self.original_append(elem)
+ end
+ end
+ end
-module Cucumber
class Feature
+ attr_accessor :tags
+
+ def initialize
+ @tags = TagSet.new
+ end
+
end
end
diff --git a/lib/cucumber-skin/tag.rb b/lib/cucumber-skin/tag.rb
new file mode 100644
index 0000000..a62249b
--- /dev/null
+++ b/lib/cucumber-skin/tag.rb
@@ -0,0 +1,4 @@
+module CucumberSkin
+ class Tag < Struct.new(:label)
+ end
+end
diff --git a/spec/cucumber-skin/feature/feature_spec.rb b/spec/cucumber-skin/feature/feature_spec.rb
index 4a40bf3..dabb65c 100644
--- a/spec/cucumber-skin/feature/feature_spec.rb
+++ b/spec/cucumber-skin/feature/feature_spec.rb
@@ -1,14 +1,60 @@
-# spec/cucumber-skin/feature/feature_spec.rb
+# spec/cucumber-skin/cucumber-skin/feature_spec.rb
# $Id$
# Require the spec helper relative to this file
require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
-# No need to type Cucumber:: before each call
-include Cucumber
+# No need to type CucumberSkin:: before each call
+include CucumberSkin
describe Feature do
- # All of our specs for Feature will go in here
+ describe 'tags' do
+ it "should have an empty list of tags" do
+ Feature.new.tags.should == []
+ end
+
+ it "should contain a tag added to it" do
+ tag = Tag.new('some tag')
+ feature = Feature.new
+ feature.tags << tag
+ feature.tags.should == [tag]
+ end
+
+ it "should contain all tags added to it" do
+ tag1 = Tag.new('some tag')
+ tag2 = Tag.new('some other tag')
+ tag3 = Tag.new('last tag')
+ feature = Feature.new
+ feature.tags << tag1
+ feature.tags << tag3
+ feature.tags << tag2
+ feature.tags.should include(tag1, tag2, tag3)
+ end
+
+ it "can not contain a certain tag more than once" do
+ tag = Tag.new('some tag')
+ feature = Feature.new
+ feature.tags << tag
+ feature.tags << tag
+ feature.should have(1).tags
+ end
+
+ # This works out of the box, but just put this here to be sure
+ it "can not contain a tag with an identical string more than once" do
+ tag1 = Tag.new('some tag')
+ tag2 = 'some tag'
+ feature = Feature.new
+ feature.tags << tag1
+ feature.tags << tag2
+ feature.should have(1).tags
+ end
+
+ it "should allow you to add a string to its tags and convert it to a tag" do
+ feature = Feature.new
+ feature.tags << 'string_tag'
+ feature.tags.first.should be_instance_of(Tag)
+ end
+ end
end
# EOF
diff --git a/spec/cucumber-skin/feature/tag_spec.rb b/spec/cucumber-skin/feature/tag_spec.rb
new file mode 100644
index 0000000..6a5f98c
--- /dev/null
+++ b/spec/cucumber-skin/feature/tag_spec.rb
@@ -0,0 +1,23 @@
+# spec/cucumber-skin/cucumber-skin/tag_spec.rb
+# $Id$
+
+# Require the spec helper relative to this file
+require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
+
+# No need to type CucumberSkin:: before each call
+include CucumberSkin
+
+describe Tag do
+ it "should have the string it was initialized with as its label" do
+ Tag.new('some_tag').label.should == 'some_tag'
+ end
+
+ # This works out of the box, but just put this here to be sure
+ it "should be identical if its label is identical" do
+ tag1 = Tag.new('some tag')
+ tag2 = Tag.new('some tag')
+ tag1.should == tag2
+ end
+end
+
+# EOF
diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb
index 51e2d94..9f1208c 100644
--- a/spec/spec_helper.rb
+++ b/spec/spec_helper.rb
@@ -1,16 +1,18 @@
require File.expand_path(
File.join(File.dirname(__FILE__), %w[.. lib cucumber-skin]))
Spec::Runner.configure do |config|
# == Mock Framework
#
# RSpec uses it's own mocking framework by default. If you prefer to
# use mocha, flexmock or RR, uncomment the appropriate line:
#
# config.mock_with :mocha
# config.mock_with :flexmock
# config.mock_with :rr
end
+require 'ruby-debug'
+
# EOF
|
tomtt/cucumber-skin | e7110d890ec42f9f44b090e9d19f3741fad7aa5c | Make specs works | diff --git a/lib/cucumber-skin/feature.rb b/lib/cucumber-skin/feature.rb
new file mode 100644
index 0000000..37832ac
--- /dev/null
+++ b/lib/cucumber-skin/feature.rb
@@ -0,0 +1,6 @@
+puts 'Required feature'
+
+module Cucumber
+ class Feature
+ end
+end
diff --git a/spec/cucumber-skin/feature/feature_spec.rb b/spec/cucumber-skin/feature/feature_spec.rb
new file mode 100644
index 0000000..4a40bf3
--- /dev/null
+++ b/spec/cucumber-skin/feature/feature_spec.rb
@@ -0,0 +1,14 @@
+# spec/cucumber-skin/feature/feature_spec.rb
+# $Id$
+
+# Require the spec helper relative to this file
+require File.join(File.dirname(__FILE__), %w[ .. .. spec_helper])
+
+# No need to type Cucumber:: before each call
+include Cucumber
+
+describe Feature do
+ # All of our specs for Feature will go in here
+end
+
+# EOF
|
tomtt/cucumber-skin | 4aa156304d8d6b3e44b5b9dbfb3c7ba118455a05 | Initial mr bones gem structure | diff --git a/.bnsignore b/.bnsignore
new file mode 100644
index 0000000..c8a51f2
--- /dev/null
+++ b/.bnsignore
@@ -0,0 +1,16 @@
+# The list of files that should be ignored by Mr Bones.
+# Lines that start with '#' are comments.
+#
+# A .gitignore file can be used instead by setting it as the ignore
+# file in your Rakefile:
+#
+# PROJ.ignore_file = '.gitignore'
+#
+# For a project with a C extension, the following would be a good set of
+# exclude patterns (uncomment them if you want to use them):
+# *.[oa]
+# *~
+announcement.txt
+coverage
+doc
+pkg
diff --git a/History.txt b/History.txt
new file mode 100644
index 0000000..529b2b0
--- /dev/null
+++ b/History.txt
@@ -0,0 +1,4 @@
+== 1.0.0 / 2009-03-25
+
+* 1 major enhancement
+ * Birthday!
diff --git a/README.txt b/README.txt
new file mode 100644
index 0000000..4a1bdbb
--- /dev/null
+++ b/README.txt
@@ -0,0 +1,48 @@
+cucumber-skin
+ by Tom ten Thij
+ http://tomtenthij.co.uk
+
+== DESCRIPTION:
+
+A cucumber formatter that generates html files to present test results
+
+== FEATURES/PROBLEMS:
+
+* FIXME (list of features or problems)
+
+== SYNOPSIS:
+
+ FIXME (code sample of usage)
+
+== REQUIREMENTS:
+
+* FIXME (list of requirements)
+
+== INSTALL:
+
+* FIXME (sudo gem install, anything else)
+
+== LICENSE:
+
+(The MIT License)
+
+Copyright (c) 2008 FIXME (different license?)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+'Software'), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Rakefile b/Rakefile
new file mode 100644
index 0000000..5d0734b
--- /dev/null
+++ b/Rakefile
@@ -0,0 +1,30 @@
+# Look in the tasks/setup.rb file for the various options that can be
+# configured in this Rakefile. The .rake files in the tasks directory
+# are where the options are used.
+
+begin
+ require 'bones'
+ Bones.setup
+rescue LoadError
+ begin
+ load 'tasks/setup.rb'
+ rescue LoadError
+ raise RuntimeError, '### please install the "bones" gem ###'
+ end
+end
+
+ensure_in_path 'lib'
+require 'cucumber-skin'
+
+task :default => 'spec:run'
+
+PROJ.name = 'cucumber-skin'
+PROJ.authors = 'Tom ten Thij'
+PROJ.email = '[email protected]'
+PROJ.url = 'http://github.com/tomtt/cucumber-skin/'
+PROJ.version = CucumberSkin::VERSION
+PROJ.rubyforge.name = 'cucumber-skin'
+
+PROJ.spec.opts << '--color'
+
+# EOF
diff --git a/bin/cucumber-skin b/bin/cucumber-skin
new file mode 100644
index 0000000..bd51321
--- /dev/null
+++ b/bin/cucumber-skin
@@ -0,0 +1,8 @@
+#!/usr/bin/env ruby
+
+require File.expand_path(
+ File.join(File.dirname(__FILE__), %w[.. lib cucumber-skin]))
+
+# Put your code here
+
+# EOF
diff --git a/lib/cucumber-skin.rb b/lib/cucumber-skin.rb
new file mode 100644
index 0000000..5a1d0f9
--- /dev/null
+++ b/lib/cucumber-skin.rb
@@ -0,0 +1,49 @@
+
+module CucumberSkin
+
+ # :stopdoc:
+ VERSION = '1.0.0'
+ LIBPATH = ::File.expand_path(::File.dirname(__FILE__)) + ::File::SEPARATOR
+ PATH = ::File.dirname(LIBPATH) + ::File::SEPARATOR
+ # :startdoc:
+
+ # Returns the version string for the library.
+ #
+ def self.version
+ VERSION
+ end
+
+ # Returns the library path for the module. If any arguments are given,
+ # they will be joined to the end of the libray path using
+ # <tt>File.join</tt>.
+ #
+ def self.libpath( *args )
+ args.empty? ? LIBPATH : ::File.join(LIBPATH, args.flatten)
+ end
+
+ # Returns the lpath for the module. If any arguments are given,
+ # they will be joined to the end of the path using
+ # <tt>File.join</tt>.
+ #
+ def self.path( *args )
+ args.empty? ? PATH : ::File.join(PATH, args.flatten)
+ end
+
+ # Utility method used to require all files ending in .rb that lie in the
+ # directory below this file that has the same name as the filename passed
+ # in. Optionally, a specific _directory_ name can be passed in such that
+ # the _filename_ does not have to be equivalent to the directory.
+ #
+ def self.require_all_libs_relative_to( fname, dir = nil )
+ dir ||= ::File.basename(fname, '.*')
+ search_me = ::File.expand_path(
+ ::File.join(::File.dirname(fname), dir, '**', '*.rb'))
+
+ Dir.glob(search_me).sort.each {|rb| require rb}
+ end
+
+end # module CucumberSkin
+
+CucumberSkin.require_all_libs_relative_to(__FILE__)
+
+# EOF
diff --git a/spec/cucumber-skin_spec.rb b/spec/cucumber-skin_spec.rb
new file mode 100644
index 0000000..06c46a6
--- /dev/null
+++ b/spec/cucumber-skin_spec.rb
@@ -0,0 +1,7 @@
+
+require File.join(File.dirname(__FILE__), %w[spec_helper])
+
+describe CucumberSkin do
+end
+
+# EOF
diff --git a/spec/spec_helper.rb b/spec/spec_helper.rb
new file mode 100644
index 0000000..51e2d94
--- /dev/null
+++ b/spec/spec_helper.rb
@@ -0,0 +1,16 @@
+
+require File.expand_path(
+ File.join(File.dirname(__FILE__), %w[.. lib cucumber-skin]))
+
+Spec::Runner.configure do |config|
+ # == Mock Framework
+ #
+ # RSpec uses it's own mocking framework by default. If you prefer to
+ # use mocha, flexmock or RR, uncomment the appropriate line:
+ #
+ # config.mock_with :mocha
+ # config.mock_with :flexmock
+ # config.mock_with :rr
+end
+
+# EOF
diff --git a/test/test_cucumber-skin.rb b/test/test_cucumber-skin.rb
new file mode 100644
index 0000000..e69de29
|
tomtt/cucumber-skin | 8aec8f0150f11ce361742bbe9324bb1b08fd84ce | Start of cucumber-skin repos | diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..7b0cd9d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+.DS_Store
+*~
+*.swp
+*.log
+.project
+tmp
|
dojo4/ey-cloud-recipes | 7b554b8f454aa00ae31678a400db611e3e8c2a79 | switching to default mongodb noauth | diff --git a/cookbooks/mongodb/files/default/mongodb-master.conf b/cookbooks/mongodb/files/default/mongodb-master.conf
index 1d5c680..1d00c12 100644
--- a/cookbooks/mongodb/files/default/mongodb-master.conf
+++ b/cookbooks/mongodb/files/default/mongodb-master.conf
@@ -1,12 +1,12 @@
# Mongodb essentials
MONGODB_EXEC="/usr/bin/mongod"
MONGODB_DATA="/db/mongodb/master-data"
MONGODB_LOG="/var/log/mongodb/mongodb.log"
MONGODB_USER="mongodb"
MONGODB_PID_FILE="/var/run/mongodb/mongodb.pid"
MONGODB_IP="0.0.0.0"
MONGODB_PORT="27017"
# Set extra options here, such as disabling the admin web server
-MONGODB_OPTIONS="--auth --master" # add -vvvvv for verbose logging
+#MONGODB_OPTIONS="--auth --master" # add -vvvvv for verbose logging
|
dojo4/ey-cloud-recipes | 1c220fee49684a4492b288f7633ca1f103d13c9c | un-commented out mongodb user creation | diff --git a/cookbooks/mongodb/recipes/default.rb b/cookbooks/mongodb/recipes/default.rb
index 9882e51..ba220d9 100644
--- a/cookbooks/mongodb/recipes/default.rb
+++ b/cookbooks/mongodb/recipes/default.rb
@@ -1,99 +1,99 @@
#
# Cookbook Name:: mongodb
# Recipe:: default
#
# NOTE: Be sure to edit files/default/mongodb-slave.conf
# If you plan on using replication with a database slave
if ['db_master','db_slave','solo'].include?(node[:instance_role])
package "dev-db/mongodb-bin" do
action :install
end
directory '/db/mongodb/master-data' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/db/mongodb/slave-data' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/var/log/mongodb' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/var/run/mongodb' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
remote_file "/etc/logrotate.d/mongodb" do
owner "root"
group "root"
mode 0755
source "mongodb.logrotate"
backup false
action :create
end
remote_file "/etc/conf.d/mongodb" do
owner "root"
group "root"
mode 0755
source "mongodb-master.conf" if node[:instance_role] == 'solo'
source "mongodb-master.conf" if node[:instance_role] == 'db_master'
source "mongodb-slave.conf" if node[:instance_role] == 'db_slave'
backup false
action :create
end
execute "enable-mongodb" do
command "rc-update add mongodb default"
action :run
end
execute "start-mongodb" do
command "/etc/init.d/mongodb restart"
action :run
not_if "/etc/init.d/mongodb status | grep started"
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
- # execute "create-mongodb-root-user" do
- # command "/usr/bin/mongo admin --eval 'db.addUser(\"root\",\"#{user[:password]}\")'"
- # action :run
- # not_if "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\")' | grep ^1$"
- # end
-
- # execute "create-mongodb-replication-user" do
- # command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"local\").addUser(\"repl\",\"#{user[:password]}\")'"
- # action :run
- # not_if "/usr/bin/mongo local --eval 'db.auth(\"repl\",\"#{user[:password]}\")' | grep ^1$"
- # end
+ execute "create-mongodb-root-user" do
+ command "/usr/bin/mongo admin --eval 'db.addUser(\"root\",\"#{user[:password]}\")'"
+ action :run
+ not_if "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\")' | grep ^1$"
+ end
+
+ execute "create-mongodb-replication-user" do
+ command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"local\").addUser(\"repl\",\"#{user[:password]}\")'"
+ action :run
+ not_if "/usr/bin/mongo local --eval 'db.auth(\"repl\",\"#{user[:password]}\")' | grep ^1$"
+ end
- # execute "create-mongodb-application-users" do
- # command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"#{db_name}\").addUser(\"#{user[:username]}\",\"#{user[:password]}\")'"
- # action :run
- # not_if "/usr/bin/mongo #{db_name} --eval 'db.auth(\"#{user[:username]}\",\"#{user[:password]}\")' | grep ^1$"
- # end
+ execute "create-mongodb-application-users" do
+ command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"#{db_name}\").addUser(\"#{user[:username]}\",\"#{user[:password]}\")'"
+ action :run
+ not_if "/usr/bin/mongo #{db_name} --eval 'db.auth(\"#{user[:username]}\",\"#{user[:password]}\")' | grep ^1$"
+ end
end
end
|
dojo4/ey-cloud-recipes | 93f4522fde98690bc7e0efc486b5a3306a9b0150 | commented out mongodb user creation | diff --git a/cookbooks/mongodb/recipes/default.rb b/cookbooks/mongodb/recipes/default.rb
index 7b3bbb6..9882e51 100644
--- a/cookbooks/mongodb/recipes/default.rb
+++ b/cookbooks/mongodb/recipes/default.rb
@@ -1,99 +1,99 @@
#
# Cookbook Name:: mongodb
# Recipe:: default
#
# NOTE: Be sure to edit files/default/mongodb-slave.conf
# If you plan on using replication with a database slave
if ['db_master','db_slave','solo'].include?(node[:instance_role])
package "dev-db/mongodb-bin" do
action :install
end
directory '/db/mongodb/master-data' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/db/mongodb/slave-data' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/var/log/mongodb' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/var/run/mongodb' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
remote_file "/etc/logrotate.d/mongodb" do
owner "root"
group "root"
mode 0755
source "mongodb.logrotate"
backup false
action :create
end
remote_file "/etc/conf.d/mongodb" do
owner "root"
group "root"
mode 0755
source "mongodb-master.conf" if node[:instance_role] == 'solo'
source "mongodb-master.conf" if node[:instance_role] == 'db_master'
source "mongodb-slave.conf" if node[:instance_role] == 'db_slave'
backup false
action :create
end
execute "enable-mongodb" do
command "rc-update add mongodb default"
action :run
end
execute "start-mongodb" do
command "/etc/init.d/mongodb restart"
action :run
not_if "/etc/init.d/mongodb status | grep started"
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
- execute "create-mongodb-root-user" do
+ # execute "create-mongodb-root-user" do
# command "/usr/bin/mongo admin --eval 'db.addUser(\"root\",\"#{user[:password]}\")'"
# action :run
# not_if "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\")' | grep ^1$"
- end
+ # end
- execute "create-mongodb-replication-user" do
+ # execute "create-mongodb-replication-user" do
# command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"local\").addUser(\"repl\",\"#{user[:password]}\")'"
# action :run
# not_if "/usr/bin/mongo local --eval 'db.auth(\"repl\",\"#{user[:password]}\")' | grep ^1$"
- end
+ # end
- execute "create-mongodb-application-users" do
+ # execute "create-mongodb-application-users" do
# command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"#{db_name}\").addUser(\"#{user[:username]}\",\"#{user[:password]}\")'"
# action :run
# not_if "/usr/bin/mongo #{db_name} --eval 'db.auth(\"#{user[:username]}\",\"#{user[:password]}\")' | grep ^1$"
- end
+ # end
end
end
|
dojo4/ey-cloud-recipes | 565fd441149b8f654a5c71c82406c6589c373d4d | commented out mongodb user creation | diff --git a/cookbooks/mongodb/recipes/default.rb b/cookbooks/mongodb/recipes/default.rb
index f5ce14a..7b3bbb6 100644
--- a/cookbooks/mongodb/recipes/default.rb
+++ b/cookbooks/mongodb/recipes/default.rb
@@ -1,99 +1,99 @@
#
# Cookbook Name:: mongodb
# Recipe:: default
#
# NOTE: Be sure to edit files/default/mongodb-slave.conf
# If you plan on using replication with a database slave
if ['db_master','db_slave','solo'].include?(node[:instance_role])
package "dev-db/mongodb-bin" do
action :install
end
directory '/db/mongodb/master-data' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/db/mongodb/slave-data' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/var/log/mongodb' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/var/run/mongodb' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
remote_file "/etc/logrotate.d/mongodb" do
owner "root"
group "root"
mode 0755
source "mongodb.logrotate"
backup false
action :create
end
remote_file "/etc/conf.d/mongodb" do
owner "root"
group "root"
mode 0755
source "mongodb-master.conf" if node[:instance_role] == 'solo'
source "mongodb-master.conf" if node[:instance_role] == 'db_master'
source "mongodb-slave.conf" if node[:instance_role] == 'db_slave'
backup false
action :create
end
execute "enable-mongodb" do
command "rc-update add mongodb default"
action :run
end
execute "start-mongodb" do
command "/etc/init.d/mongodb restart"
action :run
not_if "/etc/init.d/mongodb status | grep started"
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
execute "create-mongodb-root-user" do
- command "/usr/bin/mongo admin --eval 'db.addUser(\"root\",\"#{user[:password]}\")'"
- action :run
- not_if "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\")' | grep ^1$"
+ # command "/usr/bin/mongo admin --eval 'db.addUser(\"root\",\"#{user[:password]}\")'"
+ # action :run
+ # not_if "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\")' | grep ^1$"
end
execute "create-mongodb-replication-user" do
- command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"local\").addUser(\"repl\",\"#{user[:password]}\")'"
- action :run
- not_if "/usr/bin/mongo local --eval 'db.auth(\"repl\",\"#{user[:password]}\")' | grep ^1$"
+ # command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"local\").addUser(\"repl\",\"#{user[:password]}\")'"
+ # action :run
+ # not_if "/usr/bin/mongo local --eval 'db.auth(\"repl\",\"#{user[:password]}\")' | grep ^1$"
end
execute "create-mongodb-application-users" do
- command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"#{db_name}\").addUser(\"#{user[:username]}\",\"#{user[:password]}\")'"
- action :run
- not_if "/usr/bin/mongo #{db_name} --eval 'db.auth(\"#{user[:username]}\",\"#{user[:password]}\")' | grep ^1$"
+ # command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"#{db_name}\").addUser(\"#{user[:username]}\",\"#{user[:password]}\")'"
+ # action :run
+ # not_if "/usr/bin/mongo #{db_name} --eval 'db.auth(\"#{user[:username]}\",\"#{user[:password]}\")' | grep ^1$"
end
end
end
|
dojo4/ey-cloud-recipes | 7b6d499bf43b9b74dfaa072df4375b31aad6b175 | modified mongo recipe to run on solo node configurationOB | diff --git a/cookbooks/mongodb/recipes/default.rb b/cookbooks/mongodb/recipes/default.rb
index 8377a1f..f5ce14a 100644
--- a/cookbooks/mongodb/recipes/default.rb
+++ b/cookbooks/mongodb/recipes/default.rb
@@ -1,98 +1,99 @@
#
# Cookbook Name:: mongodb
# Recipe:: default
#
# NOTE: Be sure to edit files/default/mongodb-slave.conf
# If you plan on using replication with a database slave
if ['db_master','db_slave','solo'].include?(node[:instance_role])
package "dev-db/mongodb-bin" do
action :install
end
directory '/db/mongodb/master-data' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/db/mongodb/slave-data' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/var/log/mongodb' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/var/run/mongodb' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
remote_file "/etc/logrotate.d/mongodb" do
owner "root"
group "root"
mode 0755
source "mongodb.logrotate"
backup false
action :create
end
remote_file "/etc/conf.d/mongodb" do
owner "root"
group "root"
mode 0755
+ source "mongodb-master.conf" if node[:instance_role] == 'solo'
source "mongodb-master.conf" if node[:instance_role] == 'db_master'
source "mongodb-slave.conf" if node[:instance_role] == 'db_slave'
backup false
action :create
end
execute "enable-mongodb" do
command "rc-update add mongodb default"
action :run
end
execute "start-mongodb" do
command "/etc/init.d/mongodb restart"
action :run
not_if "/etc/init.d/mongodb status | grep started"
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
execute "create-mongodb-root-user" do
command "/usr/bin/mongo admin --eval 'db.addUser(\"root\",\"#{user[:password]}\")'"
action :run
not_if "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\")' | grep ^1$"
end
execute "create-mongodb-replication-user" do
command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"local\").addUser(\"repl\",\"#{user[:password]}\")'"
action :run
not_if "/usr/bin/mongo local --eval 'db.auth(\"repl\",\"#{user[:password]}\")' | grep ^1$"
end
execute "create-mongodb-application-users" do
command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"#{db_name}\").addUser(\"#{user[:username]}\",\"#{user[:password]}\")'"
action :run
not_if "/usr/bin/mongo #{db_name} --eval 'db.auth(\"#{user[:username]}\",\"#{user[:password]}\")' | grep ^1$"
end
end
end
|
dojo4/ey-cloud-recipes | d51c8b8c01e52b85995a52668f66ed2c9f125192 | modified mongo recipe to run on solo node configuration | diff --git a/cookbooks/mongodb/recipes/default.rb b/cookbooks/mongodb/recipes/default.rb
index 6d98c1c..8377a1f 100644
--- a/cookbooks/mongodb/recipes/default.rb
+++ b/cookbooks/mongodb/recipes/default.rb
@@ -1,98 +1,98 @@
#
# Cookbook Name:: mongodb
# Recipe:: default
#
# NOTE: Be sure to edit files/default/mongodb-slave.conf
# If you plan on using replication with a database slave
-if ['db_master','db_slave'].include?(node[:instance_role])
+if ['db_master','db_slave','solo'].include?(node[:instance_role])
package "dev-db/mongodb-bin" do
action :install
end
directory '/db/mongodb/master-data' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/db/mongodb/slave-data' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/var/log/mongodb' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
directory '/var/run/mongodb' do
owner 'mongodb'
group 'mongodb'
mode '0755'
action :create
recursive true
end
remote_file "/etc/logrotate.d/mongodb" do
owner "root"
group "root"
mode 0755
source "mongodb.logrotate"
backup false
action :create
end
remote_file "/etc/conf.d/mongodb" do
owner "root"
group "root"
mode 0755
source "mongodb-master.conf" if node[:instance_role] == 'db_master'
source "mongodb-slave.conf" if node[:instance_role] == 'db_slave'
backup false
action :create
end
execute "enable-mongodb" do
command "rc-update add mongodb default"
action :run
end
execute "start-mongodb" do
command "/etc/init.d/mongodb restart"
action :run
not_if "/etc/init.d/mongodb status | grep started"
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
execute "create-mongodb-root-user" do
command "/usr/bin/mongo admin --eval 'db.addUser(\"root\",\"#{user[:password]}\")'"
action :run
not_if "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\")' | grep ^1$"
end
execute "create-mongodb-replication-user" do
command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"local\").addUser(\"repl\",\"#{user[:password]}\")'"
action :run
not_if "/usr/bin/mongo local --eval 'db.auth(\"repl\",\"#{user[:password]}\")' | grep ^1$"
end
execute "create-mongodb-application-users" do
command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"#{db_name}\").addUser(\"#{user[:username]}\",\"#{user[:password]}\")'"
action :run
not_if "/usr/bin/mongo #{db_name} --eval 'db.auth(\"#{user[:username]}\",\"#{user[:password]}\")' | grep ^1$"
end
end
-end
\ No newline at end of file
+end
|
dojo4/ey-cloud-recipes | f40286ce2d66886631f609b70ea8c6ad7c503088 | switching mongodb recipe to bratta implementation | diff --git a/cookbooks/mongodb/files/default/mongodb b/cookbooks/mongodb/files/default/mongodb
deleted file mode 100644
index 88036e3..0000000
--- a/cookbooks/mongodb/files/default/mongodb
+++ /dev/null
@@ -1,17 +0,0 @@
-depend() {
- use net
-}
-
-start() {
- ebegin "Starting mongodb"
- /usr/local/mongodb/bin/mongod --master --port 27017 --dbpath /data/masterdb/ > /dev/null 2>&1 &
- /usr/local/mongodb/bin/mongod --slave --port 27018 --dbpath /data/slavedb/ > /dev/null 2>&1 &
- eend $?
-}
-
-stop() {
- ebegin "Stopping mongodb"
- start-stop-daemon --stop --quiet --exec /usr/local/mongodb/bin/mongod
- eend $?
-}
-
diff --git a/cookbooks/mongodb/files/default/mongodb-master.conf b/cookbooks/mongodb/files/default/mongodb-master.conf
new file mode 100644
index 0000000..1d5c680
--- /dev/null
+++ b/cookbooks/mongodb/files/default/mongodb-master.conf
@@ -0,0 +1,12 @@
+# Mongodb essentials
+MONGODB_EXEC="/usr/bin/mongod"
+MONGODB_DATA="/db/mongodb/master-data"
+MONGODB_LOG="/var/log/mongodb/mongodb.log"
+MONGODB_USER="mongodb"
+MONGODB_PID_FILE="/var/run/mongodb/mongodb.pid"
+
+MONGODB_IP="0.0.0.0"
+MONGODB_PORT="27017"
+
+# Set extra options here, such as disabling the admin web server
+MONGODB_OPTIONS="--auth --master" # add -vvvvv for verbose logging
diff --git a/cookbooks/mongodb/files/default/mongodb-slave.conf b/cookbooks/mongodb/files/default/mongodb-slave.conf
new file mode 100644
index 0000000..730ba53
--- /dev/null
+++ b/cookbooks/mongodb/files/default/mongodb-slave.conf
@@ -0,0 +1,12 @@
+# Mongodb essentials
+MONGODB_EXEC="/usr/bin/mongod"
+MONGODB_DATA="/db/mongodb/slave-data"
+MONGODB_LOG="/var/log/mongodb/mongodb.log"
+MONGODB_USER="mongodb"
+MONGODB_PID_FILE="/var/run/mongodb/mongodb.pid"
+
+MONGODB_IP="0.0.0.0"
+MONGODB_PORT="27017"
+
+# Set extra options here, such as disabling the admin web server
+MONGODB_OPTIONS="--auth --slave --source=10.210.191.117" # add -vvvvv for verbose logging
diff --git a/cookbooks/mongodb/files/default/mongodb.logrotate b/cookbooks/mongodb/files/default/mongodb.logrotate
new file mode 100644
index 0000000..235ac09
--- /dev/null
+++ b/cookbooks/mongodb/files/default/mongodb.logrotate
@@ -0,0 +1,13 @@
+/var/log/mongodb/mongodb.log {
+ monthly
+ rotate 10
+ size 5M
+ compress
+ delaycompress
+ notifempty
+ sharedscripts
+ missingok
+ postrotate
+ /bin/kill -USR1 `cat /var/run/mongodb/mongodb.pid`
+ endscript
+}
\ No newline at end of file
diff --git a/cookbooks/mongodb/recipes/default.rb b/cookbooks/mongodb/recipes/default.rb
index 756518c..6d98c1c 100644
--- a/cookbooks/mongodb/recipes/default.rb
+++ b/cookbooks/mongodb/recipes/default.rb
@@ -1,60 +1,98 @@
#
# Cookbook Name:: mongodb
# Recipe:: default
-
-
-directory "/data/master" do
- owner node[:owner_name]
- group node[:owner_name]
- mode 0755
- recursive true
- not_if { File.directory?('/data/master') }
-end
-
-# The recipe is not using a slave yet but it will create the directory
-# so that it is there for the future
-directory "/data/slave" do
- owner node[:owner_name]
- group node[:owner_name]
- mode 0755
- recursive true
- not_if { File.directory?('/data/slave') }
-end
-
-execute "install-mongodb" do
- command %Q{
-curl -O http://downloads.mongodb.org/linux/mongodb-linux-i686-latest.tgz &&
-mkdir /usr/local/mongodb &&
-tar -xvvf mongodb-linux-i686-latest.tgz -C /usr/local/mongodb --strip 1 &&
-rm mongodb-linux-i686-latest.tgz
-}
- not_if { File.directory?('/usr/local/mongodb') }
-end
-
-execute "add-to-path" do
- command %Q{
-echo 'export PATH=$PATH:/usr/local/mongodb/bin' >> /etc/profile
-}
- not_if "grep 'export PATH=$PATH:/usr/local/mongodb/bin' /etc/profile"
-end
-
-remote_file "/etc/init.d/mongodb" do
- source "mongodb"
- owner "root"
- group "root"
- mode 0755
-end
-
-execute "add-mongodb-to-default-run-level" do
- command %Q{
-rc-update add mongodb default
-}
- not_if "rc-status | grep mongodb"
-end
-
-execute "ensure-mongodb-is-running" do
- command %Q{
-/etc/init.d/mongodb start
-}
- not_if "pgrep mongod"
-end
+#
+
+# NOTE: Be sure to edit files/default/mongodb-slave.conf
+# If you plan on using replication with a database slave
+
+if ['db_master','db_slave'].include?(node[:instance_role])
+ package "dev-db/mongodb-bin" do
+ action :install
+ end
+
+ directory '/db/mongodb/master-data' do
+ owner 'mongodb'
+ group 'mongodb'
+ mode '0755'
+ action :create
+ recursive true
+ end
+
+ directory '/db/mongodb/slave-data' do
+ owner 'mongodb'
+ group 'mongodb'
+ mode '0755'
+ action :create
+ recursive true
+ end
+
+ directory '/var/log/mongodb' do
+ owner 'mongodb'
+ group 'mongodb'
+ mode '0755'
+ action :create
+ recursive true
+ end
+
+ directory '/var/run/mongodb' do
+ owner 'mongodb'
+ group 'mongodb'
+ mode '0755'
+ action :create
+ recursive true
+ end
+
+ remote_file "/etc/logrotate.d/mongodb" do
+ owner "root"
+ group "root"
+ mode 0755
+ source "mongodb.logrotate"
+ backup false
+ action :create
+ end
+
+ remote_file "/etc/conf.d/mongodb" do
+ owner "root"
+ group "root"
+ mode 0755
+ source "mongodb-master.conf" if node[:instance_role] == 'db_master'
+ source "mongodb-slave.conf" if node[:instance_role] == 'db_slave'
+ backup false
+ action :create
+ end
+
+ execute "enable-mongodb" do
+ command "rc-update add mongodb default"
+ action :run
+ end
+
+ execute "start-mongodb" do
+ command "/etc/init.d/mongodb restart"
+ action :run
+ not_if "/etc/init.d/mongodb status | grep started"
+ end
+
+ node[:applications].each do |app_name,data|
+ user = node[:users].first
+ db_name = "#{app_name}_#{node[:environment][:framework_env]}"
+
+ execute "create-mongodb-root-user" do
+ command "/usr/bin/mongo admin --eval 'db.addUser(\"root\",\"#{user[:password]}\")'"
+ action :run
+ not_if "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\")' | grep ^1$"
+ end
+
+ execute "create-mongodb-replication-user" do
+ command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"local\").addUser(\"repl\",\"#{user[:password]}\")'"
+ action :run
+ not_if "/usr/bin/mongo local --eval 'db.auth(\"repl\",\"#{user[:password]}\")' | grep ^1$"
+ end
+
+ execute "create-mongodb-application-users" do
+ command "/usr/bin/mongo admin --eval 'db.auth(\"root\",\"#{user[:password]}\"); db.getMongo().getDB(\"#{db_name}\").addUser(\"#{user[:username]}\",\"#{user[:password]}\")'"
+ action :run
+ not_if "/usr/bin/mongo #{db_name} --eval 'db.auth(\"#{user[:username]}\",\"#{user[:password]}\")' | grep ^1$"
+ end
+ end
+end
\ No newline at end of file
|
dojo4/ey-cloud-recipes | 6f8e445438eb331c5db23a4740da96c39b944edb | initial add of mongodb recipe | diff --git a/cookbooks/main/recipes/default.rb b/cookbooks/main/recipes/default.rb
index b0d3b22..48e44ee 100644
--- a/cookbooks/main/recipes/default.rb
+++ b/cookbooks/main/recipes/default.rb
@@ -1,31 +1,33 @@
execute "testing" do
command %Q{
echo "i ran at #{Time.now}" >> /root/cheftime
}
end
-require_recipe 'postgres'
+require_recipe 'mongodb'
+
+#require_recipe 'postgres'
# uncomment if you want to run couchdb recipe
# require_recipe "couchdb"
# uncomment to turn use the MBARI ruby patches for decreased memory usage and better thread/continuationi performance
# require_recipe "mbari-ruby"
# uncomment to turn on thinking sphinx
# require_recipe "thinking_sphinx"
# uncomment to turn on ultrasphinx
# require_recipe "ultrasphinx"
#uncomment to turn on memcached
# require_recipe "memcached"
#uncomment to run the authorized_keys recipe
#require_recipe "authorized_keys"
#uncomment to run the eybackup_slave recipe
#require_recipe "eybackup_slave"
#uncomment to run the ssmtp recipe
#require_recipe "ssmtp"
diff --git a/cookbooks/mongodb/files/default/mongodb b/cookbooks/mongodb/files/default/mongodb
new file mode 100644
index 0000000..88036e3
--- /dev/null
+++ b/cookbooks/mongodb/files/default/mongodb
@@ -0,0 +1,17 @@
+depend() {
+ use net
+}
+
+start() {
+ ebegin "Starting mongodb"
+ /usr/local/mongodb/bin/mongod --master --port 27017 --dbpath /data/masterdb/ > /dev/null 2>&1 &
+ /usr/local/mongodb/bin/mongod --slave --port 27018 --dbpath /data/slavedb/ > /dev/null 2>&1 &
+ eend $?
+}
+
+stop() {
+ ebegin "Stopping mongodb"
+ start-stop-daemon --stop --quiet --exec /usr/local/mongodb/bin/mongod
+ eend $?
+}
+
diff --git a/cookbooks/mongodb/recipes/default.rb b/cookbooks/mongodb/recipes/default.rb
new file mode 100644
index 0000000..2cfa88d
--- /dev/null
+++ b/cookbooks/mongodb/recipes/default.rb
@@ -0,0 +1,60 @@
+#
+# Cookbook Name:: mongodb
+# Recipe:: default
+
+
+directory "/data/master" do
+ owner node[:owner_name]
+ group node[:owner_name]
+ mode 0755
+ recursive true
+ not_if { File.directory?('/data/master') }
+end
+
+# The recipe is not using a slave yet but it will create the directory
+# so that it is there for the future
+directory "/data/slave" do
+ owner node[:owner_name]
+ group node[:owner_name]
+ mode 0755
+ recursive true
+ not_if { File.directory?('/data/slave') }
+end
+
+execute "install-mongodb" do
+ command %Q{
+curl -O http://downloads.mongodb.org/linux/mongodb-linux-x86_64-1.0.0.tgz &&
+tar zxvf mongodb-linux-x86_64-1.0.0.tgz &&
+mv mongodb-linux-x86_64-1.0.0 /usr/local/mongodb &&
+rm mongodb-linux-x86_64-1.0.0.tgz
+}
+ not_if { File.directory?('/usr/local/mongodb') }
+end
+
+execute "add-to-path" do
+ command %Q{
+echo 'export PATH=$PATH:/usr/local/mongodb/bin' >> /etc/profile
+}
+ not_if "grep 'export PATH=$PATH:/usr/local/mongodb/bin' /etc/profile"
+end
+
+remote_file "/etc/init.d/mongodb" do
+ source "mongodb"
+ owner "root"
+ group "root"
+ mode 0755
+end
+
+execute "add-mongodb-to-default-run-level" do
+ command %Q{
+rc-update add mongodb default
+}
+ not_if "rc-status | grep mongodb"
+end
+
+execute "ensure-mongodb-is-running" do
+ command %Q{
+/etc/init.d/mongodb start
+}
+ not_if "pgrep mongod"
+end
|
dojo4/ey-cloud-recipes | caacb2ac9ad2a2fadc1e1840c7c86dd6d357d96c | a bunch of slightly more sane defaults for postgres instances on amazon | diff --git a/cookbooks/postgres/files/default/postgresql.conf b/cookbooks/postgres/files/default/postgresql.conf
index 69cc9cc..26efab4 100644
--- a/cookbooks/postgres/files/default/postgresql.conf
+++ b/cookbooks/postgres/files/default/postgresql.conf
@@ -1,496 +1,500 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, or use "pg_ctl reload". Some
# parameters, which are marked below, require a server shutdown and restart to
# take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some paramters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes MB = megabytes GB = gigabytes
# Time units: ms = milliseconds s = seconds min = minutes h = hours d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '(none)' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '*'
#listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost', '*' = all
# (change requires restart)
#port = 5432 # (change requires restart)
max_connections = 100 # (change requires restart)
# Note: Increasing max_connections costs ~400 bytes of shared memory per
# connection slot, plus lock space (see max_locks_per_transaction). You might
# also need to raise shared_buffers to support more connections.
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directory = '' # (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - Security and Authentication -
#authentication_timeout = 1min # 1s-600s
#ssl = off # (change requires restart)
#ssl_ciphers = 'ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers
# (change requires restart)
#password_encryption = on
#db_user_namespace = off
# Kerberos and GSSAPI
#krb_server_keyfile = '' # (change requires restart)
#krb_srvname = 'postgres' # (change requires restart, Kerberos only)
#krb_server_hostname = '' # empty string matches any keytab entry
# (change requires restart, Kerberos only)
#krb_caseins_users = off # (change requires restart)
#krb_realm = '' # (change requires restart)
# - TCP Keepalives -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 24MB # min 128kB or max_connections*16kB
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 5 # can be 0 or more
# (change requires restart)
# Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory
# per transaction slot, plus lock space (see max_locks_per_transaction).
#work_mem = 1MB # min 64kB
+work_mem = 32MB # min 64kB
#maintenance_work_mem = 16MB # min 1MB
#max_stack_depth = 2MB # min 100kB
# - Free Space Map -
-max_fsm_pages = 153600 # min max_fsm_relations*16, 6 bytes each
+max_fsm_pages = 153600 # min max_fsm_relations*16, 6 bytes each
# (change requires restart)
-#max_fsm_relations = 1000 # min 100, ~70 bytes each
+max_fsm_relations = 10000 # min 100, ~70 bytes each
# (change requires restart)
# - Kernel Resource Usage -
#max_files_per_process = 1000 # min 25
# (change requires restart)
#shared_preload_libraries = '' # (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-1000 milliseconds
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round
#------------------------------------------------------------------------------
# WRITE AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#fsync = on # turns forced synchronization on or off
#synchronous_commit = on # immediate fsync at commit
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_buffers = 64kB # min 32kB
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_segments = 3 # in logfile segments, min 1, 16MB each
+checkpoint_segments = 10 # in logfile segments, min 1, 16MB each
#checkpoint_timeout = 5min # range 30s-1h
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_warning = 30s # 0 is off
# - Archiving -
#archive_mode = off # allows archiving to be done
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
#archive_timeout = 0 # force a logfile segment switch after this
# time; 0 is off
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#effective_cache_size = 128MB
+effective_cache_size = 1280MB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
# - Other Planner Options -
#default_statistics_target = 10 # range 1-1000
+default_statistics_target = 100 # range 1-1000
#constraint_exclusion = off
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#------------------------------------------------------------------------------
# ERROR REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
logging_collector = on # Enable capturing of stderr and csvlog
# These are only used if logging_collector is on:
#log_directory = 'pg_log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_truncate_on_rotation = off # If on, an existing log file of the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
log_rotation_age = 7d # Automatic rotation of logfiles will
# happen after that time. 0 to disable.
log_rotation_size = 128MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 to disable.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
# - When to Log -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#log_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_error_verbosity = default # terse, default, or verbose messages
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this time.
#silent_mode = off # DO NOT USE without syslog or
# logging_collector
# (change requires restart)
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = off
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_hostname = off
#log_line_prefix = '' # special values:
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %i = command tag
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'none' # none, ddl, mod, all
#log_temp_files = -1 # log temporary files equal or larger
# than specified size;
# -1 disables, 0 logs all temp files
#log_timezone = unknown # actually, defaults to TZ environment
# setting
#------------------------------------------------------------------------------
# RUNTIME STATISTICS
#------------------------------------------------------------------------------
# - Query/Index Statistics Collector -
#track_activities = on
#track_counts = on
#update_process_title = on
# - Statistics Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM PARAMETERS
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least that time.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 20 # default vacuum cost delay for
# autovacuum, -1 means use
# vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#search_path = '"$user",public' # schema names
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#session_replication_role = 'origin'
#statement_timeout = 0 # 0 is disabled
#vacuum_freeze_min_age = 100000000
#xmlbinary = 'base64'
#xmloption = 'content'
# - Locale and Formatting -
datestyle = 'iso, mdy'
#timezone = unknown # actually, defaults to TZ environment
# setting
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 0 # min -15, max 2
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C' # locale for system error message
# strings
lc_monetary = 'C' # locale for monetary formatting
lc_numeric = 'C' # locale for number formatting
lc_time = 'C' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Other Defaults -
#explain_pretty_print = on
#dynamic_library_path = '$libdir'
#local_preload_libraries = ''
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
# Note: Each lock table slot uses ~270 bytes of shared memory, and there are
# max_locks_per_transaction * (max_connections + max_prepared_transactions)
# lock table slots.
#------------------------------------------------------------------------------
# VERSION/PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#add_missing_from = off
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#default_with_oids = off
#escape_string_warning = on
#regex_flavor = advanced # advanced, extended, or basic
#sql_inheritance = on
#standard_conforming_strings = off
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
#custom_variable_classes = '' # list of custom variable class names
|
dojo4/ey-cloud-recipes | 35c74c1ccb11795739ee6ec0a6ff5e47c85cdd8d | more sane defaults and logging for postgres db | diff --git a/cookbooks/postgres/files/default/postgresql.conf b/cookbooks/postgres/files/default/postgresql.conf
index 27e463e..69cc9cc 100644
--- a/cookbooks/postgres/files/default/postgresql.conf
+++ b/cookbooks/postgres/files/default/postgresql.conf
@@ -1,495 +1,496 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, or use "pg_ctl reload". Some
# parameters, which are marked below, require a server shutdown and restart to
# take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some paramters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes MB = megabytes GB = gigabytes
# Time units: ms = milliseconds s = seconds min = minutes h = hours d = days
#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
#data_directory = 'ConfigDir' # use data in another directory
# (change requires restart)
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
# (change requires restart)
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
#external_pid_file = '(none)' # write an extra PID file
# (change requires restart)
#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------
# - Connection Settings -
listen_addresses = '*'
#listen_addresses = 'localhost' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost', '*' = all
# (change requires restart)
#port = 5432 # (change requires restart)
max_connections = 100 # (change requires restart)
# Note: Increasing max_connections costs ~400 bytes of shared memory per
# connection slot, plus lock space (see max_locks_per_transaction). You might
# also need to raise shared_buffers to support more connections.
#superuser_reserved_connections = 3 # (change requires restart)
#unix_socket_directory = '' # (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)
# - Security and Authentication -
#authentication_timeout = 1min # 1s-600s
#ssl = off # (change requires restart)
#ssl_ciphers = 'ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers
# (change requires restart)
#password_encryption = on
#db_user_namespace = off
# Kerberos and GSSAPI
#krb_server_keyfile = '' # (change requires restart)
#krb_srvname = 'postgres' # (change requires restart, Kerberos only)
#krb_server_hostname = '' # empty string matches any keytab entry
# (change requires restart, Kerberos only)
#krb_caseins_users = off # (change requires restart)
#krb_realm = '' # (change requires restart)
# - TCP Keepalives -
# see "man 7 tcp" for details
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------
# - Memory -
shared_buffers = 24MB # min 128kB or max_connections*16kB
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 5 # can be 0 or more
# (change requires restart)
# Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory
# per transaction slot, plus lock space (see max_locks_per_transaction).
#work_mem = 1MB # min 64kB
#maintenance_work_mem = 16MB # min 1MB
#max_stack_depth = 2MB # min 100kB
# - Free Space Map -
max_fsm_pages = 153600 # min max_fsm_relations*16, 6 bytes each
# (change requires restart)
#max_fsm_relations = 1000 # min 100, ~70 bytes each
# (change requires restart)
# - Kernel Resource Usage -
#max_files_per_process = 1000 # min 25
# (change requires restart)
#shared_preload_libraries = '' # (change requires restart)
# - Cost-Based Vacuum Delay -
#vacuum_cost_delay = 0 # 0-1000 milliseconds
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits
# - Background Writer -
#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round
#------------------------------------------------------------------------------
# WRITE AHEAD LOG
#------------------------------------------------------------------------------
# - Settings -
#fsync = on # turns forced synchronization on or off
#synchronous_commit = on # immediate fsync at commit
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_buffers = 64kB # min 32kB
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds
#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000
# - Checkpoints -
#checkpoint_segments = 3 # in logfile segments, min 1, 16MB each
#checkpoint_timeout = 5min # range 30s-1h
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_warning = 30s # 0 is off
# - Archiving -
#archive_mode = off # allows archiving to be done
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
#archive_timeout = 0 # force a logfile segment switch after this
# time; 0 is off
#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------
# - Planner Method Configuration -
#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on
# - Planner Cost Constants -
#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#effective_cache_size = 128MB
# - Genetic Query Optimizer -
#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
# - Other Planner Options -
#default_statistics_target = 10 # range 1-1000
#constraint_exclusion = off
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses
#------------------------------------------------------------------------------
# ERROR REPORTING AND LOGGING
#------------------------------------------------------------------------------
# - Where to Log -
#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.
# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)
+logging_collector = on # Enable capturing of stderr and csvlog
# These are only used if logging_collector is on:
#log_directory = 'pg_log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_truncate_on_rotation = off # If on, an existing log file of the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
-#log_rotation_age = 1d # Automatic rotation of logfiles will
+log_rotation_age = 7d # Automatic rotation of logfiles will
# happen after that time. 0 to disable.
-#log_rotation_size = 10MB # Automatic rotation of logfiles will
+log_rotation_size = 128MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 to disable.
# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'
# - When to Log -
#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error
#log_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic
#log_error_verbosity = default # terse, default, or verbose messages
#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this time.
#silent_mode = off # DO NOT USE without syslog or
# logging_collector
# (change requires restart)
# - What to Log -
#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = off
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_hostname = off
#log_line_prefix = '' # special values:
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %i = command tag
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'none' # none, ddl, mod, all
#log_temp_files = -1 # log temporary files equal or larger
# than specified size;
# -1 disables, 0 logs all temp files
#log_timezone = unknown # actually, defaults to TZ environment
# setting
#------------------------------------------------------------------------------
# RUNTIME STATISTICS
#------------------------------------------------------------------------------
# - Query/Index Statistics Collector -
#track_activities = on
#track_counts = on
#update_process_title = on
# - Statistics Monitoring -
#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off
#------------------------------------------------------------------------------
# AUTOVACUUM PARAMETERS
#------------------------------------------------------------------------------
#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least that time.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 20 # default vacuum cost delay for
# autovacuum, -1 means use
# vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit
#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------
# - Statement Behavior -
#search_path = '"$user",public' # schema names
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#session_replication_role = 'origin'
#statement_timeout = 0 # 0 is disabled
#vacuum_freeze_min_age = 100000000
#xmlbinary = 'base64'
#xmloption = 'content'
# - Locale and Formatting -
datestyle = 'iso, mdy'
#timezone = unknown # actually, defaults to TZ environment
# setting
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 0 # min -15, max 2
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# These settings are initialized by initdb, but they can be changed.
lc_messages = 'C' # locale for system error message
# strings
lc_monetary = 'C' # locale for monetary formatting
lc_numeric = 'C' # locale for number formatting
lc_time = 'C' # locale for time formatting
# default configuration for text search
default_text_search_config = 'pg_catalog.english'
# - Other Defaults -
#explain_pretty_print = on
#dynamic_library_path = '$libdir'
#local_preload_libraries = ''
#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------
#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
# Note: Each lock table slot uses ~270 bytes of shared memory, and there are
# max_locks_per_transaction * (max_connections + max_prepared_transactions)
# lock table slots.
#------------------------------------------------------------------------------
# VERSION/PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------
# - Previous PostgreSQL Versions -
#add_missing_from = off
#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#default_with_oids = off
#escape_string_warning = on
#regex_flavor = advanced # advanced, extended, or basic
#sql_inheritance = on
#standard_conforming_strings = off
#synchronize_seqscans = on
# - Other Platforms and Clients -
#transform_null_equals = off
#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------
#custom_variable_classes = '' # list of custom variable class names
diff --git a/cookbooks/postgres/recipes/default.rb b/cookbooks/postgres/recipes/default.rb
index 2d7506f..46ea26f 100644
--- a/cookbooks/postgres/recipes/default.rb
+++ b/cookbooks/postgres/recipes/default.rb
@@ -1,162 +1,162 @@
require 'pp'
#
# Cookbook Name:: postgres
# Recipe:: default
#
#
if node[:instance_role] == 'db_master'
postgres_root = '/var/lib/postgresql'
postgres_version = '8.3'
directory '/db/postgresql' do
owner 'postgres'
group 'postgres'
mode '0755'
action :create
recursive true
end
directory '/var/lib/postgresql' do
action :delete
recursive true
end
link "setup-postgresq-db-my-symlink" do
to '/db/postgresql'
target_file postgres_root
end
execute "init-postgres" do
- command "initdb -D #{postgres_root}/#{postgres_version}/data"
+ command "initdb -D #{postgres_root}/#{postgres_version}/data --encoding=UTF8 --locale=en_US.UTF-8"
action :run
user 'postgres'
only_if "[ ! -d #{postgres_root}/#{postgres_version}/data ]"
end
remote_file "/var/lib/postgresql/8.3/data/postgresql.conf" do
source "postgresql.conf"
owner "postgres"
group "root"
mode 0600
end
template "/var/lib/postgresql/8.3/data/pg_hba.conf" do
owner 'postgres'
group 'root'
mode 0600
source "pg_hba.conf.erb"
variables({
:dbuser => node[:users].first[:username],
:dbpass => node[:users].first[:password]
})
end
execute "enable-postgres" do
command "rc-update add postgresql-#{postgres_version} default"
action :run
end
execute "restart-postgres" do
command "/etc/init.d/postgresql-#{postgres_version} restart"
action :run
not_if "/etc/init.d/postgresql-8.3 status | grep -q start"
end
gem_package "pg" do
action :install
end
template "/etc/.postgresql.backups.yml" do
owner 'root'
group 'root'
mode 0600
source "postgresql.backups.yml.erb"
variables({
:dbuser => node[:users].first[:username],
:dbpass => node[:users].first[:password],
:keep => node[:backup_window] || 14,
:id => node[:aws_secret_id],
:key => node[:aws_secret_key],
:env => node[:environment][:name]
})
end
#set backup interval
cron_hour = if node[:backup_interval].to_s == '24'
"1" # 0100 Pacific, per support's request
# NB: Instances run in the Pacific (Los Angeles) timezone
elsif node[:backup_interval]
"*/#{node[:backup_interval]}"
else
"1"
end
cron "eybackup" do
action :delete
end
cron "eybackup postgresql" do
minute '10'
hour cron_hour
day '*'
month '*'
weekday '*'
command "eybackup -e postgresql"
not_if { node[:backup_window].to_s == '0' }
end
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
if node[:instance_role] == 'db_master'
execute "create-db-user-#{user[:username]}" do
command "psql -c '\\du' | grep -q '#{user[:username]}' || psql -c \"create user #{user[:username]} with encrypted password \'#{user[:password]}\'\""
action :run
user 'postgres'
end
execute "create-db-#{db_name}" do
command "psql -c '\\l' | grep -q '#{db_name}' || createdb #{db_name}"
action :run
user 'postgres'
end
execute "grant-perms-on-#{db_name}-to-#{user[:username]}" do
command "/usr/bin/psql -c 'grant all on database #{db_name} to #{user[:username]}'"
action :run
user 'postgres'
end
execute "alter-public-schema-owner-on-#{db_name}-to-#{user[:username]}" do
command "/usr/bin/psql #{db_name} -c 'ALTER SCHEMA public OWNER TO #{user[:username]}'"
action :run
user 'postgres'
end
end
directory "/data/#{app_name}/shared/config/" do
owner user[:username]
group user[:username]
mode '0755'
action :create
recursive true
end
[ "", "keep." ].each do |prefix|
template "/data/#{app_name}/shared/config/#{prefix}database.yml" do
source "database.yml.erb"
owner user[:username]
group user[:username]
mode 0744
variables({
:username => user[:username],
:app_name => app_name,
:db_pass => user[:password]
})
not_if do File.exists?("/data/#{app_name}/shared/config/#{prefix}database.yml") end
end
end
end
|
dojo4/ey-cloud-recipes | 641ebfc3693f34c5dd609370d45ec7a7cfd5d33c | add a pg_hba.conf for network access too | diff --git a/cookbooks/postgres/recipes/default.rb b/cookbooks/postgres/recipes/default.rb
index 80fb89f..2d7506f 100644
--- a/cookbooks/postgres/recipes/default.rb
+++ b/cookbooks/postgres/recipes/default.rb
@@ -1,151 +1,162 @@
require 'pp'
#
# Cookbook Name:: postgres
# Recipe:: default
#
#
if node[:instance_role] == 'db_master'
postgres_root = '/var/lib/postgresql'
postgres_version = '8.3'
directory '/db/postgresql' do
owner 'postgres'
group 'postgres'
mode '0755'
action :create
recursive true
end
directory '/var/lib/postgresql' do
action :delete
recursive true
end
link "setup-postgresq-db-my-symlink" do
to '/db/postgresql'
target_file postgres_root
end
execute "init-postgres" do
command "initdb -D #{postgres_root}/#{postgres_version}/data"
action :run
user 'postgres'
only_if "[ ! -d #{postgres_root}/#{postgres_version}/data ]"
end
remote_file "/var/lib/postgresql/8.3/data/postgresql.conf" do
source "postgresql.conf"
owner "postgres"
group "root"
mode 0600
end
+ template "/var/lib/postgresql/8.3/data/pg_hba.conf" do
+ owner 'postgres'
+ group 'root'
+ mode 0600
+ source "pg_hba.conf.erb"
+ variables({
+ :dbuser => node[:users].first[:username],
+ :dbpass => node[:users].first[:password]
+ })
+ end
+
execute "enable-postgres" do
command "rc-update add postgresql-#{postgres_version} default"
action :run
end
execute "restart-postgres" do
command "/etc/init.d/postgresql-#{postgres_version} restart"
action :run
not_if "/etc/init.d/postgresql-8.3 status | grep -q start"
end
gem_package "pg" do
action :install
end
template "/etc/.postgresql.backups.yml" do
owner 'root'
group 'root'
mode 0600
source "postgresql.backups.yml.erb"
variables({
:dbuser => node[:users].first[:username],
:dbpass => node[:users].first[:password],
:keep => node[:backup_window] || 14,
:id => node[:aws_secret_id],
:key => node[:aws_secret_key],
:env => node[:environment][:name]
})
end
#set backup interval
cron_hour = if node[:backup_interval].to_s == '24'
"1" # 0100 Pacific, per support's request
# NB: Instances run in the Pacific (Los Angeles) timezone
elsif node[:backup_interval]
"*/#{node[:backup_interval]}"
else
"1"
end
cron "eybackup" do
action :delete
end
cron "eybackup postgresql" do
minute '10'
hour cron_hour
day '*'
month '*'
weekday '*'
command "eybackup -e postgresql"
not_if { node[:backup_window].to_s == '0' }
end
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
if node[:instance_role] == 'db_master'
execute "create-db-user-#{user[:username]}" do
command "psql -c '\\du' | grep -q '#{user[:username]}' || psql -c \"create user #{user[:username]} with encrypted password \'#{user[:password]}\'\""
action :run
user 'postgres'
end
execute "create-db-#{db_name}" do
command "psql -c '\\l' | grep -q '#{db_name}' || createdb #{db_name}"
action :run
user 'postgres'
end
execute "grant-perms-on-#{db_name}-to-#{user[:username]}" do
command "/usr/bin/psql -c 'grant all on database #{db_name} to #{user[:username]}'"
action :run
user 'postgres'
end
execute "alter-public-schema-owner-on-#{db_name}-to-#{user[:username]}" do
command "/usr/bin/psql #{db_name} -c 'ALTER SCHEMA public OWNER TO #{user[:username]}'"
action :run
user 'postgres'
end
end
directory "/data/#{app_name}/shared/config/" do
owner user[:username]
group user[:username]
mode '0755'
action :create
recursive true
end
[ "", "keep." ].each do |prefix|
template "/data/#{app_name}/shared/config/#{prefix}database.yml" do
source "database.yml.erb"
owner user[:username]
group user[:username]
mode 0744
variables({
:username => user[:username],
:app_name => app_name,
:db_pass => user[:password]
})
not_if do File.exists?("/data/#{app_name}/shared/config/#{prefix}database.yml") end
end
end
end
diff --git a/cookbooks/postgres/templates/default/pg_hba.conf.erb b/cookbooks/postgres/templates/default/pg_hba.conf.erb
new file mode 100644
index 0000000..8a2a8a0
--- /dev/null
+++ b/cookbooks/postgres/templates/default/pg_hba.conf.erb
@@ -0,0 +1,77 @@
+# PostgreSQL Client Authentication Configuration File
+# ===================================================
+#
+# Refer to the "Client Authentication" section in the
+# PostgreSQL documentation for a complete description
+# of this file. A short synopsis follows.
+#
+# This file controls: which hosts are allowed to connect, how clients
+# are authenticated, which PostgreSQL user names they can use, which
+# databases they can access. Records take one of these forms:
+#
+# local DATABASE USER METHOD [OPTION]
+# host DATABASE USER CIDR-ADDRESS METHOD [OPTION]
+# hostssl DATABASE USER CIDR-ADDRESS METHOD [OPTION]
+# hostnossl DATABASE USER CIDR-ADDRESS METHOD [OPTION]
+#
+# (The uppercase items must be replaced by actual values.)
+#
+# The first field is the connection type: "local" is a Unix-domain socket,
+# "host" is either a plain or SSL-encrypted TCP/IP socket, "hostssl" is an
+# SSL-encrypted TCP/IP socket, and "hostnossl" is a plain TCP/IP socket.
+#
+# DATABASE can be "all", "sameuser", "samerole", a database name, or
+# a comma-separated list thereof.
+#
+# USER can be "all", a user name, a group name prefixed with "+", or
+# a comma-separated list thereof. In both the DATABASE and USER fields
+# you can also write a file name prefixed with "@" to include names from
+# a separate file.
+#
+# CIDR-ADDRESS specifies the set of hosts the record matches.
+# It is made up of an IP address and a CIDR mask that is an integer
+# (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that specifies
+# the number of significant bits in the mask. Alternatively, you can write
+# an IP address and netmask in separate columns to specify the set of hosts.
+#
+# METHOD can be "trust", "reject", "md5", "crypt", "password", "gss", "sspi",
+# "krb5", "ident", "pam" or "ldap". Note that "password" sends passwords
+# in clear text; "md5" is preferred since it sends encrypted passwords.
+#
+# OPTION is the ident map or the name of the PAM service, depending on METHOD.
+#
+# Database and user names containing spaces, commas, quotes and other special
+# characters must be quoted. Quoting one of the keywords "all", "sameuser" or
+# "samerole" makes the name lose its special character, and just match a
+# database or username with that name.
+#
+# This file is read on server startup and when the postmaster receives
+# a SIGHUP signal. If you edit the file on a running system, you have
+# to SIGHUP the postmaster for the changes to take effect. You can use
+# "pg_ctl reload" to do that.
+
+# Put your actual configuration here
+# ----------------------------------
+#
+# If you want to allow non-local connections, you need to add more
+# "host" records. In that case you will also need to make PostgreSQL listen
+# on a non-local interface via the listen_addresses configuration parameter,
+# or via the -i or -h command line switches.
+#
+
+# CAUTION: Configuring the system for local "trust" authentication allows
+# any local user to connect as any PostgreSQL user, including the database
+# superuser. If you do not trust all your local users, use another
+# authentication method.
+
+
+# TYPE DATABASE USER CIDR-ADDRESS METHOD
+
+# "local" is for Unix domain socket connections only
+local all all trust
+# IPv4 local connections:
+host all all 127.0.0.1/32 trust
+# IPv6 local connections:
+host all all ::1/128 trust
+#
+host all <%= @dbuser %> 10.0.0.0/8 md5
|
dojo4/ey-cloud-recipes | 3f1f5f014b97d3a5202338c076e75223de22b22d | only write keep files if they don't exist already | diff --git a/cookbooks/postgres/recipes/default.rb b/cookbooks/postgres/recipes/default.rb
index cc9e980..80fb89f 100644
--- a/cookbooks/postgres/recipes/default.rb
+++ b/cookbooks/postgres/recipes/default.rb
@@ -1,150 +1,151 @@
require 'pp'
#
# Cookbook Name:: postgres
# Recipe:: default
#
#
if node[:instance_role] == 'db_master'
postgres_root = '/var/lib/postgresql'
postgres_version = '8.3'
directory '/db/postgresql' do
owner 'postgres'
group 'postgres'
mode '0755'
action :create
recursive true
end
directory '/var/lib/postgresql' do
action :delete
recursive true
end
link "setup-postgresq-db-my-symlink" do
to '/db/postgresql'
target_file postgres_root
end
execute "init-postgres" do
command "initdb -D #{postgres_root}/#{postgres_version}/data"
action :run
user 'postgres'
only_if "[ ! -d #{postgres_root}/#{postgres_version}/data ]"
end
remote_file "/var/lib/postgresql/8.3/data/postgresql.conf" do
source "postgresql.conf"
owner "postgres"
group "root"
mode 0600
end
execute "enable-postgres" do
command "rc-update add postgresql-#{postgres_version} default"
action :run
end
execute "restart-postgres" do
command "/etc/init.d/postgresql-#{postgres_version} restart"
action :run
not_if "/etc/init.d/postgresql-8.3 status | grep -q start"
end
gem_package "pg" do
action :install
end
template "/etc/.postgresql.backups.yml" do
owner 'root'
group 'root'
mode 0600
source "postgresql.backups.yml.erb"
variables({
:dbuser => node[:users].first[:username],
:dbpass => node[:users].first[:password],
:keep => node[:backup_window] || 14,
:id => node[:aws_secret_id],
:key => node[:aws_secret_key],
:env => node[:environment][:name]
})
end
#set backup interval
cron_hour = if node[:backup_interval].to_s == '24'
"1" # 0100 Pacific, per support's request
# NB: Instances run in the Pacific (Los Angeles) timezone
elsif node[:backup_interval]
"*/#{node[:backup_interval]}"
else
"1"
end
cron "eybackup" do
action :delete
end
cron "eybackup postgresql" do
minute '10'
hour cron_hour
day '*'
month '*'
weekday '*'
command "eybackup -e postgresql"
not_if { node[:backup_window].to_s == '0' }
end
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
if node[:instance_role] == 'db_master'
execute "create-db-user-#{user[:username]}" do
command "psql -c '\\du' | grep -q '#{user[:username]}' || psql -c \"create user #{user[:username]} with encrypted password \'#{user[:password]}\'\""
action :run
user 'postgres'
end
execute "create-db-#{db_name}" do
command "psql -c '\\l' | grep -q '#{db_name}' || createdb #{db_name}"
action :run
user 'postgres'
end
execute "grant-perms-on-#{db_name}-to-#{user[:username]}" do
command "/usr/bin/psql -c 'grant all on database #{db_name} to #{user[:username]}'"
action :run
user 'postgres'
end
execute "alter-public-schema-owner-on-#{db_name}-to-#{user[:username]}" do
command "/usr/bin/psql #{db_name} -c 'ALTER SCHEMA public OWNER TO #{user[:username]}'"
action :run
user 'postgres'
end
end
directory "/data/#{app_name}/shared/config/" do
owner user[:username]
group user[:username]
mode '0755'
action :create
recursive true
end
[ "", "keep." ].each do |prefix|
template "/data/#{app_name}/shared/config/#{prefix}database.yml" do
source "database.yml.erb"
owner user[:username]
group user[:username]
mode 0744
variables({
:username => user[:username],
:app_name => app_name,
:db_pass => user[:password]
})
+ not_if do File.exists?("/data/#{app_name}/shared/config/#{prefix}database.yml") end
end
end
end
|
dojo4/ey-cloud-recipes | d82c04dafe7525b66e86eae64e5f1c1b7197a061 | add support for running a cluster | diff --git a/cookbooks/main/recipes/default.rb b/cookbooks/main/recipes/default.rb
index bc15188..b0d3b22 100644
--- a/cookbooks/main/recipes/default.rb
+++ b/cookbooks/main/recipes/default.rb
@@ -1,29 +1,31 @@
execute "testing" do
command %Q{
echo "i ran at #{Time.now}" >> /root/cheftime
}
end
+require_recipe 'postgres'
+
# uncomment if you want to run couchdb recipe
# require_recipe "couchdb"
# uncomment to turn use the MBARI ruby patches for decreased memory usage and better thread/continuationi performance
# require_recipe "mbari-ruby"
# uncomment to turn on thinking sphinx
# require_recipe "thinking_sphinx"
# uncomment to turn on ultrasphinx
# require_recipe "ultrasphinx"
#uncomment to turn on memcached
# require_recipe "memcached"
#uncomment to run the authorized_keys recipe
#require_recipe "authorized_keys"
#uncomment to run the eybackup_slave recipe
#require_recipe "eybackup_slave"
#uncomment to run the ssmtp recipe
#require_recipe "ssmtp"
diff --git a/cookbooks/postgres/files/default/postgresql.conf b/cookbooks/postgres/files/default/postgresql.conf
new file mode 100644
index 0000000..27e463e
--- /dev/null
+++ b/cookbooks/postgres/files/default/postgresql.conf
@@ -0,0 +1,495 @@
+# -----------------------------
+# PostgreSQL configuration file
+# -----------------------------
+#
+# This file consists of lines of the form:
+#
+# name = value
+#
+# (The "=" is optional.) Whitespace may be used. Comments are introduced with
+# "#" anywhere on a line. The complete list of parameter names and allowed
+# values can be found in the PostgreSQL documentation.
+#
+# The commented-out settings shown in this file represent the default values.
+# Re-commenting a setting is NOT sufficient to revert it to the default value;
+# you need to reload the server.
+#
+# This file is read on server startup and when the server receives a SIGHUP
+# signal. If you edit the file on a running system, you have to SIGHUP the
+# server for the changes to take effect, or use "pg_ctl reload". Some
+# parameters, which are marked below, require a server shutdown and restart to
+# take effect.
+#
+# Any parameter can also be given as a command-line option to the server, e.g.,
+# "postgres -c log_connections=on". Some paramters can be changed at run time
+# with the "SET" SQL command.
+#
+# Memory units: kB = kilobytes MB = megabytes GB = gigabytes
+# Time units: ms = milliseconds s = seconds min = minutes h = hours d = days
+
+
+#------------------------------------------------------------------------------
+# FILE LOCATIONS
+#------------------------------------------------------------------------------
+
+# The default values of these variables are driven from the -D command-line
+# option or PGDATA environment variable, represented here as ConfigDir.
+
+#data_directory = 'ConfigDir' # use data in another directory
+ # (change requires restart)
+#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
+ # (change requires restart)
+#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
+ # (change requires restart)
+
+# If external_pid_file is not explicitly set, no extra PID file is written.
+#external_pid_file = '(none)' # write an extra PID file
+ # (change requires restart)
+
+
+#------------------------------------------------------------------------------
+# CONNECTIONS AND AUTHENTICATION
+#------------------------------------------------------------------------------
+
+# - Connection Settings -
+
+listen_addresses = '*'
+
+#listen_addresses = 'localhost' # what IP address(es) to listen on;
+ # comma-separated list of addresses;
+ # defaults to 'localhost', '*' = all
+ # (change requires restart)
+#port = 5432 # (change requires restart)
+max_connections = 100 # (change requires restart)
+# Note: Increasing max_connections costs ~400 bytes of shared memory per
+# connection slot, plus lock space (see max_locks_per_transaction). You might
+# also need to raise shared_buffers to support more connections.
+#superuser_reserved_connections = 3 # (change requires restart)
+#unix_socket_directory = '' # (change requires restart)
+#unix_socket_group = '' # (change requires restart)
+#unix_socket_permissions = 0777 # begin with 0 to use octal notation
+ # (change requires restart)
+#bonjour_name = '' # defaults to the computer name
+ # (change requires restart)
+
+# - Security and Authentication -
+
+#authentication_timeout = 1min # 1s-600s
+#ssl = off # (change requires restart)
+#ssl_ciphers = 'ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers
+ # (change requires restart)
+#password_encryption = on
+#db_user_namespace = off
+
+# Kerberos and GSSAPI
+#krb_server_keyfile = '' # (change requires restart)
+#krb_srvname = 'postgres' # (change requires restart, Kerberos only)
+#krb_server_hostname = '' # empty string matches any keytab entry
+ # (change requires restart, Kerberos only)
+#krb_caseins_users = off # (change requires restart)
+#krb_realm = '' # (change requires restart)
+
+# - TCP Keepalives -
+# see "man 7 tcp" for details
+
+#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
+ # 0 selects the system default
+#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
+ # 0 selects the system default
+#tcp_keepalives_count = 0 # TCP_KEEPCNT;
+ # 0 selects the system default
+
+
+#------------------------------------------------------------------------------
+# RESOURCE USAGE (except WAL)
+#------------------------------------------------------------------------------
+
+# - Memory -
+
+shared_buffers = 24MB # min 128kB or max_connections*16kB
+ # (change requires restart)
+#temp_buffers = 8MB # min 800kB
+#max_prepared_transactions = 5 # can be 0 or more
+ # (change requires restart)
+# Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory
+# per transaction slot, plus lock space (see max_locks_per_transaction).
+#work_mem = 1MB # min 64kB
+#maintenance_work_mem = 16MB # min 1MB
+#max_stack_depth = 2MB # min 100kB
+
+# - Free Space Map -
+
+max_fsm_pages = 153600 # min max_fsm_relations*16, 6 bytes each
+ # (change requires restart)
+#max_fsm_relations = 1000 # min 100, ~70 bytes each
+ # (change requires restart)
+
+# - Kernel Resource Usage -
+
+#max_files_per_process = 1000 # min 25
+ # (change requires restart)
+#shared_preload_libraries = '' # (change requires restart)
+
+# - Cost-Based Vacuum Delay -
+
+#vacuum_cost_delay = 0 # 0-1000 milliseconds
+#vacuum_cost_page_hit = 1 # 0-10000 credits
+#vacuum_cost_page_miss = 10 # 0-10000 credits
+#vacuum_cost_page_dirty = 20 # 0-10000 credits
+#vacuum_cost_limit = 200 # 1-10000 credits
+
+# - Background Writer -
+
+#bgwriter_delay = 200ms # 10-10000ms between rounds
+#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
+#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round
+
+
+#------------------------------------------------------------------------------
+# WRITE AHEAD LOG
+#------------------------------------------------------------------------------
+
+# - Settings -
+
+#fsync = on # turns forced synchronization on or off
+#synchronous_commit = on # immediate fsync at commit
+#wal_sync_method = fsync # the default is the first option
+ # supported by the operating system:
+ # open_datasync
+ # fdatasync
+ # fsync
+ # fsync_writethrough
+ # open_sync
+#full_page_writes = on # recover from partial page writes
+#wal_buffers = 64kB # min 32kB
+ # (change requires restart)
+#wal_writer_delay = 200ms # 1-10000 milliseconds
+
+#commit_delay = 0 # range 0-100000, in microseconds
+#commit_siblings = 5 # range 1-1000
+
+# - Checkpoints -
+
+#checkpoint_segments = 3 # in logfile segments, min 1, 16MB each
+#checkpoint_timeout = 5min # range 30s-1h
+#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
+#checkpoint_warning = 30s # 0 is off
+
+# - Archiving -
+
+#archive_mode = off # allows archiving to be done
+ # (change requires restart)
+#archive_command = '' # command to use to archive a logfile segment
+#archive_timeout = 0 # force a logfile segment switch after this
+ # time; 0 is off
+
+
+#------------------------------------------------------------------------------
+# QUERY TUNING
+#------------------------------------------------------------------------------
+
+# - Planner Method Configuration -
+
+#enable_bitmapscan = on
+#enable_hashagg = on
+#enable_hashjoin = on
+#enable_indexscan = on
+#enable_mergejoin = on
+#enable_nestloop = on
+#enable_seqscan = on
+#enable_sort = on
+#enable_tidscan = on
+
+# - Planner Cost Constants -
+
+#seq_page_cost = 1.0 # measured on an arbitrary scale
+#random_page_cost = 4.0 # same scale as above
+#cpu_tuple_cost = 0.01 # same scale as above
+#cpu_index_tuple_cost = 0.005 # same scale as above
+#cpu_operator_cost = 0.0025 # same scale as above
+#effective_cache_size = 128MB
+
+# - Genetic Query Optimizer -
+
+#geqo = on
+#geqo_threshold = 12
+#geqo_effort = 5 # range 1-10
+#geqo_pool_size = 0 # selects default based on effort
+#geqo_generations = 0 # selects default based on effort
+#geqo_selection_bias = 2.0 # range 1.5-2.0
+
+# - Other Planner Options -
+
+#default_statistics_target = 10 # range 1-1000
+#constraint_exclusion = off
+#from_collapse_limit = 8
+#join_collapse_limit = 8 # 1 disables collapsing of explicit
+ # JOIN clauses
+
+
+#------------------------------------------------------------------------------
+# ERROR REPORTING AND LOGGING
+#------------------------------------------------------------------------------
+
+# - Where to Log -
+
+#log_destination = 'stderr' # Valid values are combinations of
+ # stderr, csvlog, syslog and eventlog,
+ # depending on platform. csvlog
+ # requires logging_collector to be on.
+
+# This is used when logging to stderr:
+#logging_collector = off # Enable capturing of stderr and csvlog
+ # into log files. Required to be on for
+ # csvlogs.
+ # (change requires restart)
+
+# These are only used if logging_collector is on:
+#log_directory = 'pg_log' # directory where log files are written,
+ # can be absolute or relative to PGDATA
+#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
+ # can include strftime() escapes
+#log_truncate_on_rotation = off # If on, an existing log file of the
+ # same name as the new log file will be
+ # truncated rather than appended to.
+ # But such truncation only occurs on
+ # time-driven rotation, not on restarts
+ # or size-driven rotation. Default is
+ # off, meaning append to existing files
+ # in all cases.
+#log_rotation_age = 1d # Automatic rotation of logfiles will
+ # happen after that time. 0 to disable.
+#log_rotation_size = 10MB # Automatic rotation of logfiles will
+ # happen after that much log output.
+ # 0 to disable.
+
+# These are relevant when logging to syslog:
+#syslog_facility = 'LOCAL0'
+#syslog_ident = 'postgres'
+
+
+# - When to Log -
+
+#client_min_messages = notice # values in order of decreasing detail:
+ # debug5
+ # debug4
+ # debug3
+ # debug2
+ # debug1
+ # log
+ # notice
+ # warning
+ # error
+
+#log_min_messages = notice # values in order of decreasing detail:
+ # debug5
+ # debug4
+ # debug3
+ # debug2
+ # debug1
+ # info
+ # notice
+ # warning
+ # error
+ # log
+ # fatal
+ # panic
+
+#log_error_verbosity = default # terse, default, or verbose messages
+
+#log_min_error_statement = error # values in order of decreasing detail:
+ # debug5
+ # debug4
+ # debug3
+ # debug2
+ # debug1
+ # info
+ # notice
+ # warning
+ # error
+ # log
+ # fatal
+ # panic (effectively off)
+
+#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
+ # and their durations, > 0 logs only
+ # statements running at least this time.
+
+#silent_mode = off # DO NOT USE without syslog or
+ # logging_collector
+ # (change requires restart)
+
+# - What to Log -
+
+#debug_print_parse = off
+#debug_print_rewritten = off
+#debug_print_plan = off
+#debug_pretty_print = off
+#log_checkpoints = off
+#log_connections = off
+#log_disconnections = off
+#log_duration = off
+#log_hostname = off
+#log_line_prefix = '' # special values:
+ # %u = user name
+ # %d = database name
+ # %r = remote host and port
+ # %h = remote host
+ # %p = process ID
+ # %t = timestamp without milliseconds
+ # %m = timestamp with milliseconds
+ # %i = command tag
+ # %c = session ID
+ # %l = session line number
+ # %s = session start timestamp
+ # %v = virtual transaction ID
+ # %x = transaction ID (0 if none)
+ # %q = stop here in non-session
+ # processes
+ # %% = '%'
+ # e.g. '<%u%%%d> '
+#log_lock_waits = off # log lock waits >= deadlock_timeout
+#log_statement = 'none' # none, ddl, mod, all
+#log_temp_files = -1 # log temporary files equal or larger
+ # than specified size;
+ # -1 disables, 0 logs all temp files
+#log_timezone = unknown # actually, defaults to TZ environment
+ # setting
+
+
+#------------------------------------------------------------------------------
+# RUNTIME STATISTICS
+#------------------------------------------------------------------------------
+
+# - Query/Index Statistics Collector -
+
+#track_activities = on
+#track_counts = on
+#update_process_title = on
+
+
+# - Statistics Monitoring -
+
+#log_parser_stats = off
+#log_planner_stats = off
+#log_executor_stats = off
+#log_statement_stats = off
+
+
+#------------------------------------------------------------------------------
+# AUTOVACUUM PARAMETERS
+#------------------------------------------------------------------------------
+
+#autovacuum = on # Enable autovacuum subprocess? 'on'
+ # requires track_counts to also be on.
+#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
+ # their durations, > 0 logs only
+ # actions running at least that time.
+#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
+#autovacuum_naptime = 1min # time between autovacuum runs
+#autovacuum_vacuum_threshold = 50 # min number of row updates before
+ # vacuum
+#autovacuum_analyze_threshold = 50 # min number of row updates before
+ # analyze
+#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
+#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
+#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
+ # (change requires restart)
+#autovacuum_vacuum_cost_delay = 20 # default vacuum cost delay for
+ # autovacuum, -1 means use
+ # vacuum_cost_delay
+#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
+ # autovacuum, -1 means use
+ # vacuum_cost_limit
+
+
+#------------------------------------------------------------------------------
+# CLIENT CONNECTION DEFAULTS
+#------------------------------------------------------------------------------
+
+# - Statement Behavior -
+
+#search_path = '"$user",public' # schema names
+#default_tablespace = '' # a tablespace name, '' uses the default
+#temp_tablespaces = '' # a list of tablespace names, '' uses
+ # only default tablespace
+#check_function_bodies = on
+#default_transaction_isolation = 'read committed'
+#default_transaction_read_only = off
+#session_replication_role = 'origin'
+#statement_timeout = 0 # 0 is disabled
+#vacuum_freeze_min_age = 100000000
+#xmlbinary = 'base64'
+#xmloption = 'content'
+
+# - Locale and Formatting -
+
+datestyle = 'iso, mdy'
+#timezone = unknown # actually, defaults to TZ environment
+ # setting
+#timezone_abbreviations = 'Default' # Select the set of available time zone
+ # abbreviations. Currently, there are
+ # Default
+ # Australia
+ # India
+ # You can create your own file in
+ # share/timezonesets/.
+#extra_float_digits = 0 # min -15, max 2
+#client_encoding = sql_ascii # actually, defaults to database
+ # encoding
+
+# These settings are initialized by initdb, but they can be changed.
+lc_messages = 'C' # locale for system error message
+ # strings
+lc_monetary = 'C' # locale for monetary formatting
+lc_numeric = 'C' # locale for number formatting
+lc_time = 'C' # locale for time formatting
+
+# default configuration for text search
+default_text_search_config = 'pg_catalog.english'
+
+# - Other Defaults -
+
+#explain_pretty_print = on
+#dynamic_library_path = '$libdir'
+#local_preload_libraries = ''
+
+
+#------------------------------------------------------------------------------
+# LOCK MANAGEMENT
+#------------------------------------------------------------------------------
+
+#deadlock_timeout = 1s
+#max_locks_per_transaction = 64 # min 10
+ # (change requires restart)
+# Note: Each lock table slot uses ~270 bytes of shared memory, and there are
+# max_locks_per_transaction * (max_connections + max_prepared_transactions)
+# lock table slots.
+
+
+#------------------------------------------------------------------------------
+# VERSION/PLATFORM COMPATIBILITY
+#------------------------------------------------------------------------------
+
+# - Previous PostgreSQL Versions -
+
+#add_missing_from = off
+#array_nulls = on
+#backslash_quote = safe_encoding # on, off, or safe_encoding
+#default_with_oids = off
+#escape_string_warning = on
+#regex_flavor = advanced # advanced, extended, or basic
+#sql_inheritance = on
+#standard_conforming_strings = off
+#synchronize_seqscans = on
+
+# - Other Platforms and Clients -
+
+#transform_null_equals = off
+
+
+#------------------------------------------------------------------------------
+# CUSTOMIZED OPTIONS
+#------------------------------------------------------------------------------
+
+#custom_variable_classes = '' # list of custom variable class names
diff --git a/cookbooks/postgres/recipes/default.rb b/cookbooks/postgres/recipes/default.rb
index 690c8da..cc9e980 100644
--- a/cookbooks/postgres/recipes/default.rb
+++ b/cookbooks/postgres/recipes/default.rb
@@ -1,141 +1,150 @@
require 'pp'
#
# Cookbook Name:: postgres
# Recipe:: default
#
-postgres_root = '/var/lib/postgresql'
-postgres_version = '8.3'
-
-directory '/db/postgresql' do
- owner 'postgres'
- group 'postgres'
- mode '0755'
- action :create
- recursive true
-end
+#
+if node[:instance_role] == 'db_master'
+ postgres_root = '/var/lib/postgresql'
+ postgres_version = '8.3'
+
+ directory '/db/postgresql' do
+ owner 'postgres'
+ group 'postgres'
+ mode '0755'
+ action :create
+ recursive true
+ end
-directory '/var/lib/postgresql' do
- action :delete
- recursive true
-end
+ directory '/var/lib/postgresql' do
+ action :delete
+ recursive true
+ end
-link "setup-postgresq-db-my-symlink" do
- to '/db/postgresql'
- target_file postgres_root
-end
+ link "setup-postgresq-db-my-symlink" do
+ to '/db/postgresql'
+ target_file postgres_root
+ end
-execute "init-postgres" do
- command "initdb -D #{postgres_root}/#{postgres_version}/data"
- action :run
- user 'postgres'
- only_if "[ ! -d #{postgres_root}/#{postgres_version}/data ]"
-end
+ execute "init-postgres" do
+ command "initdb -D #{postgres_root}/#{postgres_version}/data"
+ action :run
+ user 'postgres'
+ only_if "[ ! -d #{postgres_root}/#{postgres_version}/data ]"
+ end
-execute "enable-postgres" do
- command "rc-update add postgresql-#{postgres_version} default"
- action :run
-end
+ remote_file "/var/lib/postgresql/8.3/data/postgresql.conf" do
+ source "postgresql.conf"
+ owner "postgres"
+ group "root"
+ mode 0600
+ end
-execute "restart-postgres" do
- command "/etc/init.d/postgresql-#{postgres_version} restart"
- action :run
- not_if "/etc/init.d/postgresql-8.3 status | grep -q start"
-end
+ execute "enable-postgres" do
+ command "rc-update add postgresql-#{postgres_version} default"
+ action :run
+ end
-gem_package "pg" do
- action :install
-end
+ execute "restart-postgres" do
+ command "/etc/init.d/postgresql-#{postgres_version} restart"
+ action :run
+ not_if "/etc/init.d/postgresql-8.3 status | grep -q start"
+ end
-template "/etc/.postgresql.backups.yml" do
- owner 'root'
- group 'root'
- mode 0600
- source "postgresql.backups.yml.erb"
- variables({
- :dbuser => node[:users].first[:username],
- :dbpass => node[:users].first[:password],
- :keep => node[:backup_window] || 14,
- :id => node[:aws_secret_id],
- :key => node[:aws_secret_key],
- :env => node[:environment][:name]
- })
-end
+ gem_package "pg" do
+ action :install
+ end
+
+ template "/etc/.postgresql.backups.yml" do
+ owner 'root'
+ group 'root'
+ mode 0600
+ source "postgresql.backups.yml.erb"
+ variables({
+ :dbuser => node[:users].first[:username],
+ :dbpass => node[:users].first[:password],
+ :keep => node[:backup_window] || 14,
+ :id => node[:aws_secret_id],
+ :key => node[:aws_secret_key],
+ :env => node[:environment][:name]
+ })
+ end
-#set backup interval
-cron_hour = if node[:backup_interval].to_s == '24'
- "1" # 0100 Pacific, per support's request
- # NB: Instances run in the Pacific (Los Angeles) timezone
- elsif node[:backup_interval]
- "*/#{node[:backup_interval]}"
- else
- "1"
- end
+ #set backup interval
+ cron_hour = if node[:backup_interval].to_s == '24'
+ "1" # 0100 Pacific, per support's request
+ # NB: Instances run in the Pacific (Los Angeles) timezone
+ elsif node[:backup_interval]
+ "*/#{node[:backup_interval]}"
+ else
+ "1"
+ end
-cron "eybackup" do
- action :delete
-end
+ cron "eybackup" do
+ action :delete
+ end
-cron "eybackup postgresql" do
- minute '10'
- hour cron_hour
- day '*'
- month '*'
- weekday '*'
- command "eybackup -e postgresql"
- not_if { node[:backup_window].to_s == '0' }
+ cron "eybackup postgresql" do
+ minute '10'
+ hour cron_hour
+ day '*'
+ month '*'
+ weekday '*'
+ command "eybackup -e postgresql"
+ not_if { node[:backup_window].to_s == '0' }
+ end
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
- execute "create-db-user-#{user[:username]}" do
- command "psql -c '\\du' | grep -q '#{user[:username]}' || psql -c \"create user #{user[:username]} with encrypted password \'#{user[:password]}\'\""
- action :run
- user 'postgres'
- end
-
- execute "create-db-#{db_name}" do
- command "psql -c '\\l' | grep -q '#{db_name}' || createdb #{db_name}"
- action :run
- user 'postgres'
- end
-
- execute "grant-perms-on-#{db_name}-to-#{user[:username]}" do
- command "/usr/bin/psql -c 'grant all on database #{db_name} to #{user[:username]}'"
- action :run
- user 'postgres'
- end
-
- execute "alter-public-schema-owner-on-#{db_name}-to-#{user[:username]}" do
- command "/usr/bin/psql #{db_name} -c 'ALTER SCHEMA public OWNER TO #{user[:username]}'"
- action :run
- user 'postgres'
+ if node[:instance_role] == 'db_master'
+ execute "create-db-user-#{user[:username]}" do
+ command "psql -c '\\du' | grep -q '#{user[:username]}' || psql -c \"create user #{user[:username]} with encrypted password \'#{user[:password]}\'\""
+ action :run
+ user 'postgres'
+ end
+
+ execute "create-db-#{db_name}" do
+ command "psql -c '\\l' | grep -q '#{db_name}' || createdb #{db_name}"
+ action :run
+ user 'postgres'
+ end
+
+ execute "grant-perms-on-#{db_name}-to-#{user[:username]}" do
+ command "/usr/bin/psql -c 'grant all on database #{db_name} to #{user[:username]}'"
+ action :run
+ user 'postgres'
+ end
+
+ execute "alter-public-schema-owner-on-#{db_name}-to-#{user[:username]}" do
+ command "/usr/bin/psql #{db_name} -c 'ALTER SCHEMA public OWNER TO #{user[:username]}'"
+ action :run
+ user 'postgres'
+ end
end
- template "/data/#{app_name}/shared/config/keep.database.yml" do
- source "database.yml.erb"
+ directory "/data/#{app_name}/shared/config/" do
owner user[:username]
group user[:username]
- mode 0744
- variables({
- :username => user[:username],
- :app_name => app_name,
- :db_pass => user[:password]
- })
+ mode '0755'
+ action :create
+ recursive true
end
- template "/data/#{app_name}/shared/config/database.yml" do
- source "database.yml.erb"
- owner user[:username]
- group user[:username]
- mode 0744
- variables({
+ [ "", "keep." ].each do |prefix|
+ template "/data/#{app_name}/shared/config/#{prefix}database.yml" do
+ source "database.yml.erb"
+ owner user[:username]
+ group user[:username]
+ mode 0744
+ variables({
:username => user[:username],
:app_name => app_name,
:db_pass => user[:password]
- })
+ })
+ end
end
-
end
|
dojo4/ey-cloud-recipes | bf4b64ef94352f3b370db48022eb0d1b5523f982 | Bugfix for postgres recipes | diff --git a/cookbooks/postgres/recipes/default.rb b/cookbooks/postgres/recipes/default.rb
index 856b29a..690c8da 100644
--- a/cookbooks/postgres/recipes/default.rb
+++ b/cookbooks/postgres/recipes/default.rb
@@ -1,143 +1,141 @@
require 'pp'
#
# Cookbook Name:: postgres
# Recipe:: default
#
postgres_root = '/var/lib/postgresql'
postgres_version = '8.3'
directory '/db/postgresql' do
owner 'postgres'
group 'postgres'
mode '0755'
action :create
recursive true
end
directory '/var/lib/postgresql' do
action :delete
recursive true
end
link "setup-postgresq-db-my-symlink" do
to '/db/postgresql'
target_file postgres_root
end
execute "init-postgres" do
command "initdb -D #{postgres_root}/#{postgres_version}/data"
action :run
user 'postgres'
only_if "[ ! -d #{postgres_root}/#{postgres_version}/data ]"
end
execute "enable-postgres" do
command "rc-update add postgresql-#{postgres_version} default"
action :run
end
execute "restart-postgres" do
command "/etc/init.d/postgresql-#{postgres_version} restart"
action :run
not_if "/etc/init.d/postgresql-8.3 status | grep -q start"
end
gem_package "pg" do
action :install
end
template "/etc/.postgresql.backups.yml" do
owner 'root'
group 'root'
mode 0600
source "postgresql.backups.yml.erb"
variables({
:dbuser => node[:users].first[:username],
:dbpass => node[:users].first[:password],
:keep => node[:backup_window] || 14,
:id => node[:aws_secret_id],
:key => node[:aws_secret_key],
:env => node[:environment][:name]
})
end
#set backup interval
cron_hour = if node[:backup_interval].to_s == '24'
"1" # 0100 Pacific, per support's request
# NB: Instances run in the Pacific (Los Angeles) timezone
elsif node[:backup_interval]
"*/#{node[:backup_interval]}"
else
"1"
end
cron "eybackup" do
action :delete
end
cron "eybackup postgresql" do
minute '10'
hour cron_hour
day '*'
month '*'
weekday '*'
command "eybackup -e postgresql"
not_if { node[:backup_window].to_s == '0' }
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
execute "create-db-user-#{user[:username]}" do
- command "psql -c \"create user #{user[:username]} with encrypted password \'#{user[:password]}\'\""
+ command "psql -c '\\du' | grep -q '#{user[:username]}' || psql -c \"create user #{user[:username]} with encrypted password \'#{user[:password]}\'\""
action :run
user 'postgres'
- not_if "psql -c '\\du' | grep -q '#{user[:username]}'"
end
execute "create-db-#{db_name}" do
- command "createdb #{db_name}"
+ command "psql -c '\\l' | grep -q '#{db_name}' || createdb #{db_name}"
action :run
user 'postgres'
- not_if "psql -c '\\l' | grep -q '#{db_name}'"
end
execute "grant-perms-on-#{db_name}-to-#{user[:username]}" do
command "/usr/bin/psql -c 'grant all on database #{db_name} to #{user[:username]}'"
action :run
user 'postgres'
end
execute "alter-public-schema-owner-on-#{db_name}-to-#{user[:username]}" do
command "/usr/bin/psql #{db_name} -c 'ALTER SCHEMA public OWNER TO #{user[:username]}'"
action :run
user 'postgres'
end
template "/data/#{app_name}/shared/config/keep.database.yml" do
source "database.yml.erb"
owner user[:username]
group user[:username]
mode 0744
variables({
:username => user[:username],
:app_name => app_name,
:db_pass => user[:password]
})
end
template "/data/#{app_name}/shared/config/database.yml" do
source "database.yml.erb"
owner user[:username]
group user[:username]
mode 0744
variables({
:username => user[:username],
:app_name => app_name,
:db_pass => user[:password]
})
end
end
|
dojo4/ey-cloud-recipes | 9f281c518ad752ada5e5085027e6207b0c89373e | More cleanup of postgres recipes | diff --git a/cookbooks/postgres/recipes/default.rb b/cookbooks/postgres/recipes/default.rb
index 93dd190..856b29a 100644
--- a/cookbooks/postgres/recipes/default.rb
+++ b/cookbooks/postgres/recipes/default.rb
@@ -1,142 +1,143 @@
require 'pp'
#
# Cookbook Name:: postgres
# Recipe:: default
#
postgres_root = '/var/lib/postgresql'
postgres_version = '8.3'
directory '/db/postgresql' do
owner 'postgres'
group 'postgres'
mode '0755'
action :create
recursive true
end
+directory '/var/lib/postgresql' do
+ action :delete
+ recursive true
+end
+
link "setup-postgresq-db-my-symlink" do
to '/db/postgresql'
target_file postgres_root
end
-execute "setup-postgresql-db-symlink" do
- command "rm -rf /var/lib/postgresql; ln -s /data/postgresql /var/lib/postgresql"
- action :run
- only_if "if [ ! -L #{postgres_root} ]; then exit 0; fi; exit 1;"
-end
-
execute "init-postgres" do
command "initdb -D #{postgres_root}/#{postgres_version}/data"
action :run
user 'postgres'
- only_if "if [ ! -d #{postgres_root}/#{postgres_version}/data ]; then exit 0; fi; exit 1;"
+ only_if "[ ! -d #{postgres_root}/#{postgres_version}/data ]"
end
execute "enable-postgres" do
command "rc-update add postgresql-#{postgres_version} default"
action :run
end
execute "restart-postgres" do
command "/etc/init.d/postgresql-#{postgres_version} restart"
action :run
not_if "/etc/init.d/postgresql-8.3 status | grep -q start"
end
gem_package "pg" do
action :install
end
template "/etc/.postgresql.backups.yml" do
owner 'root'
group 'root'
mode 0600
source "postgresql.backups.yml.erb"
variables({
:dbuser => node[:users].first[:username],
:dbpass => node[:users].first[:password],
:keep => node[:backup_window] || 14,
:id => node[:aws_secret_id],
:key => node[:aws_secret_key],
:env => node[:environment][:name]
})
end
#set backup interval
cron_hour = if node[:backup_interval].to_s == '24'
"1" # 0100 Pacific, per support's request
# NB: Instances run in the Pacific (Los Angeles) timezone
elsif node[:backup_interval]
"*/#{node[:backup_interval]}"
else
"1"
end
cron "eybackup" do
action :delete
end
cron "eybackup postgresql" do
minute '10'
hour cron_hour
day '*'
month '*'
weekday '*'
command "eybackup -e postgresql"
not_if { node[:backup_window].to_s == '0' }
end
node[:applications].each do |app_name,data|
user = node[:users].first
db_name = "#{app_name}_#{node[:environment][:framework_env]}"
execute "create-db-user-#{user[:username]}" do
- command "`psql -c '\\du' | grep -q '#{user[:username]}'`; if [ $? -eq 1 ]; then\n psql -c \"create user #{user[:username]} with encrypted password \'#{user[:password]}\'\"\nfi"
+ command "psql -c \"create user #{user[:username]} with encrypted password \'#{user[:password]}\'\""
action :run
user 'postgres'
+ not_if "psql -c '\\du' | grep -q '#{user[:username]}'"
end
execute "create-db-#{db_name}" do
- command "`psql -c '\\l' | grep -q '#{db_name}'`; if [ $? -eq 1 ]; then\n createdb #{db_name}\nfi"
+ command "createdb #{db_name}"
action :run
user 'postgres'
+ not_if "psql -c '\\l' | grep -q '#{db_name}'"
end
execute "grant-perms-on-#{db_name}-to-#{user[:username]}" do
command "/usr/bin/psql -c 'grant all on database #{db_name} to #{user[:username]}'"
action :run
user 'postgres'
end
execute "alter-public-schema-owner-on-#{db_name}-to-#{user[:username]}" do
command "/usr/bin/psql #{db_name} -c 'ALTER SCHEMA public OWNER TO #{user[:username]}'"
action :run
user 'postgres'
end
template "/data/#{app_name}/shared/config/keep.database.yml" do
source "database.yml.erb"
owner user[:username]
group user[:username]
mode 0744
variables({
:username => user[:username],
:app_name => app_name,
:db_pass => user[:password]
})
end
template "/data/#{app_name}/shared/config/database.yml" do
source "database.yml.erb"
owner user[:username]
group user[:username]
mode 0744
variables({
:username => user[:username],
:app_name => app_name,
:db_pass => user[:password]
})
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.